runtime

Imports

Imports #

"internal/abi"
"internal/goarch"
"unsafe"
"unsafe"
"internal/abi"
"internal/goarch"
"unsafe"
"internal/abi"
"internal/runtime/atomic"
"unsafe"
"unsafe"
"internal/goarch"
"internal/goos"
"unsafe"
"unsafe"
"internal/abi"
"internal/goarch"
"internal/runtime/atomic"
"internal/runtime/sys"
"unsafe"
"internal/cpu"
"internal/goarch"
"internal/runtime/atomic"
"internal/runtime/sys"
"unsafe"
_ "unsafe"
_ "unsafe"
"internal/abi"
"unsafe"
"internal/abi"
"internal/goarch"
"internal/runtime/atomic"
"internal/runtime/sys"
"internal/stringslite"
"unsafe"
"unsafe"
"internal/abi"
"internal/goarch"
"internal/stringslite"
"unsafe"
"unsafe"
"unsafe"
"internal/abi"
_ "unsafe"
"unsafe"
"unsafe"
"unsafe"
"unsafe"
"internal/cpu"
"unsafe"
"internal/runtime/atomic"
"unsafe"
"internal/runtime/atomic"
"unsafe"
"unsafe"
"internal/abi"
"internal/goarch"
"internal/goexperiment"
"internal/runtime/atomic"
"unsafe"
"internal/abi"
"unsafe"
"C"
"unsafe"
"internal/abi"
"internal/goarch"
"internal/runtime/atomic"
"internal/runtime/sys"
"unsafe"
"internal/abi"
"internal/goarch"
"internal/runtime/atomic"
"internal/runtime/math"
"internal/runtime/sys"
"unsafe"
"unsafe"
"internal/abi"
"internal/cpu"
"internal/goarch"
"internal/goos"
"internal/runtime/atomic"
"internal/runtime/exithook"
"internal/runtime/sys"
"internal/stringslite"
"unsafe"
_ "unsafe"
"internal/abi"
"internal/goarch"
"internal/runtime/sys"
"unsafe"
"unsafe"
"internal/abi"
"internal/goarch"
"unsafe"
"internal/abi"
"internal/cpu"
"internal/goarch"
"internal/goos"
"internal/runtime/atomic"
"internal/runtime/sys"
"unsafe"
"unsafe"
"unsafe"
"internal/abi"
"internal/runtime/maps"
"unsafe"
"internal/cpu"
"internal/runtime/sys"
"unsafe"
"internal/goarch"
"unsafe"
"unsafe"
_ "unsafe"
"internal/abi"
"internal/goarch"
"unsafe"
"unsafe"
"unsafe"
"internal/goarch"
"unsafe"
"internal/goarch"
"unsafe"
"internal/abi"
"internal/runtime/maps"
"unsafe"
"internal/abi"
"internal/goarch"
"unsafe"
"internal/goarch"
"internal/runtime/atomic"
"unsafe"
"unsafe"
"internal/abi"
"internal/runtime/atomic"
"internal/runtime/sys"
"unsafe"
"unsafe"
"internal/runtime/atomic"
"internal/runtime/sys"
"unsafe"
"internal/abi"
"internal/stringslite"
"unsafe"
"internal/abi"
"internal/goarch"
"unsafe"
"internal/runtime/atomic"
"unsafe"
"internal/abi"
"internal/runtime/sys"
"unsafe"
"unsafe"
"internal/runtime/atomic"
"internal/runtime/sys"
"unsafe"
"unsafe"
"internal/runtime/atomic"
"unsafe"
"unsafe"
"unsafe"
_ "unsafe"
"unsafe"
"unsafe"
"internal/goarch"
"internal/goos"
"unsafe"
"internal/goarch"
"internal/runtime/atomic"
"internal/runtime/sys"
"unsafe"
"internal/abi"
"unsafe"
"unsafe"
"internal/runtime/atomic"
"internal/runtime/syscall"
"unsafe"
"internal/abi"
"unsafe"
"internal/cpu"
"internal/runtime/atomic"
"unsafe"
"unsafe"
"unsafe"
"unsafe"
"internal/runtime/atomic"
"internal/runtime/sys"
"unsafe"
"unsafe"
"unsafe"
_ "unsafe"
"internal/cpu"
"internal/abi"
"internal/cpu"
"internal/goarch"
"internal/runtime/sys"
"unsafe"
"C"
"internal/abi"
"internal/goarch"
"internal/runtime/atomic"
"internal/runtime/sys"
"unsafe"
"unsafe"
"unsafe"
_ "unsafe"
"internal/runtime/atomic"
"internal/abi"
"internal/goarch"
"unsafe"
"unsafe"
"internal/goarch"
"internal/goos"
"internal/runtime/atomic"
"internal/runtime/math"
"internal/runtime/sys"
"unsafe"
"unsafe"
"internal/abi"
"internal/runtime/sys"
"unsafe"
"unsafe"
"unsafe"
"unsafe"
"internal/cpu"
"unsafe"
"internal/abi"
"internal/runtime/atomic"
"internal/stringslite"
"unsafe"
"internal/abi"
"internal/goarch"
"unsafe"
"unsafe"
"unsafe"
"C"
"internal/abi"
"unsafe"
"internal/cpu"
"internal/goexperiment"
"internal/runtime/atomic"
"unsafe"
"unsafe"
"unsafe"
_ "unsafe"
"C"
"internal/goarch"
"internal/runtime/atomic"
"internal/runtime/sys"
"unsafe"
"internal/runtime/sys"
"unsafe"
"internal/abi"
"unsafe"
"internal/goarch"
"unsafe"
"unsafe"
"unsafe"
"C"
"internal/goarch"
"internal/runtime/atomic"
"unsafe"
"internal/abi"
"internal/runtime/atomic"
"unsafe"
"internal/runtime/math"
"internal/runtime/sys"
"unsafe"
"unsafe"
"unsafe"
"internal/runtime/atomic"
"unsafe"
"internal/cpu"
"unsafe"
"internal/abi"
"internal/runtime/maps"
"internal/runtime/sys"
"unsafe"
"internal/abi"
"internal/goarch"
"internal/runtime/math"
"internal/runtime/sys"
"unsafe"
"internal/abi"
"internal/runtime/sys"
"internal/goarch"
"unsafe"
"internal/runtime/sys"
"unsafe"
"unsafe"
"internal/abi"
"internal/runtime/sys"
"unsafe"
"unsafe"
_ "unsafe"
"internal/abi"
"internal/goarch"
"internal/runtime/sys"
"unsafe"
"internal/abi"
"unsafe"
"internal/coverage/rtcov"
"unsafe"
"internal/cpu"
"internal/runtime/atomic"
"unsafe"
"internal/abi"
"internal/goarch"
"internal/runtime/sys"
"unsafe"
"unsafe"
"internal/runtime/atomic"
"unsafe"
"internal/abi"
"internal/runtime/atomic"
"internal/runtime/math"
"internal/runtime/sys"
"unsafe"
"internal/abi"
"internal/goarch"
"internal/runtime/atomic"
"internal/runtime/sys"
"unsafe"
"internal/cpu"
"internal/goarch"
"internal/runtime/atomic"
"unsafe"
"internal/cpu"
"unsafe"
"structs"
"unsafe"
"internal/abi"
"internal/goarch"
"internal/goexperiment"
"internal/runtime/sys"
"unsafe"
"internal/runtime/sys"
"unsafe"
"internal/abi"
"internal/runtime/maps"
"internal/runtime/sys"
"unsafe"
"internal/abi"
"internal/goarch"
"internal/goexperiment"
"internal/runtime/sys"
"unsafe"
"internal/runtime/sys"
"unsafe"
"internal/runtime/atomic"
"unsafe"
"unsafe"
"unsafe"
"unsafe"
"unsafe"
"unsafe"
"internal/abi"
"internal/bytealg"
"internal/goarch"
"internal/runtime/sys"
"internal/stringslite"
"unsafe"
"unsafe"
"internal/abi"
"internal/goarch"
"unsafe"
"unsafe"
"internal/abi"
"unsafe"
"internal/abi"
"unsafe"
"unsafe"
"internal/goarch"
_ "unsafe"
_ "unsafe"
"unsafe"
"unsafe"
"internal/runtime/atomic"
"unsafe"
"internal/abi"
"internal/abi"
"internal/runtime/atomic"
"unsafe"
"unsafe"
"unsafe"
_ "unsafe"
"internal/runtime/atomic"
"internal/runtime/sys"
"unsafe"
"internal/goarch"
"internal/runtime/atomic"
"unsafe"
"unsafe"
"internal/goarch"
"unsafe"
"internal/cpu"
"C"
"internal/abi"
"internal/bytealg"
"internal/runtime/sys"
_ "unsafe"
"internal/runtime/atomic"
"internal/runtime/sys"
"internal/abi"
"internal/goarch"
"internal/runtime/atomic"
"internal/runtime/syscall"
"unsafe"
"C"
"internal/runtime/math"
"unsafe"
"internal/abi"
"internal/goarch"
"unsafe"
"internal/goos"
"internal/runtime/atomic"
"internal/runtime/sys"
"unsafe"
"internal/goarch"
"internal/runtime/atomic"
"unsafe"
"internal/goarch"
"internal/runtime/atomic"
"unsafe"
_ "unsafe"
"internal/byteorder"
"internal/chacha8rand"
"internal/goarch"
"internal/runtime/math"
"unsafe"
_ "unsafe"
"internal/abi"
"internal/runtime/atomic"
"unsafe"
"internal/abi"
"unsafe"
"internal/abi"
"internal/goarch"
"internal/runtime/sys"
"unsafe"
"unsafe"
"internal/abi"
"internal/goarch"
"unsafe"
"internal/runtime/atomic"
"unsafe"
"internal/runtime/sys"
"internal/runtime/atomic"
"unsafe"
"internal/abi"
"internal/chacha8rand"
"internal/goarch"
"internal/goexperiment"
"internal/runtime/atomic"
"internal/runtime/sys"
"unsafe"
_ "unsafe"
"unsafe"
"unsafe"
"internal/stringslite"
"internal/abi"
"internal/goarch"
"unsafe"
"internal/abi"
"internal/goarch"
"internal/runtime/atomic"
"internal/runtime/sys"
"unsafe"
"internal/goarch"
"unsafe"
"unsafe"
"internal/abi"
"internal/runtime/sys"
"unsafe"
"unsafe"
"internal/abi"
"internal/runtime/sys"
"unsafe"
"C"
"internal/abi"
"internal/runtime/sys"
"unsafe"
"internal/goarch"
"unsafe"
"internal/abi"
"internal/goarch"
"internal/runtime/sys"
"unsafe"
"internal/runtime/atomic"
_ "unsafe"
"internal/runtime/atomic"
"unsafe"
"internal/abi"
"internal/goarch"
"internal/runtime/atomic"
"unsafe"
"internal/cpu"
"unsafe"
"internal/abi"
"internal/bytealg"
"internal/goarch"
"internal/runtime/sys"
"unsafe"
"C"
"unsafe"
"unsafe"
"unsafe"
"internal/runtime/atomic"
"unsafe"
"C"
"internal/runtime/atomic"
"unsafe"
"internal/goarch"
"unsafe"
"unsafe"
"internal/runtime/sys"
_ "unsafe"
"internal/abi"
"unsafe"
"internal/bytealg"
"internal/goarch"
"internal/runtime/atomic"
"unsafe"
"unsafe"
"internal/runtime/sys"
"unsafe"
"unsafe"
"C"
"internal/goarch"
"internal/runtime/atomic"
"unsafe"
"internal/abi"
"internal/goarch"
"internal/stringslite"
"internal/runtime/atomic"
"unsafe"
"internal/abi"
"internal/runtime/atomic"
"internal/runtime/sys"
"unsafe"
"unsafe"
"unsafe"
"internal/cpu"
"internal/goexperiment"
"internal/runtime/atomic"
_ "unsafe"
"internal/cpu"
"internal/goarch"
"internal/runtime/atomic"
"internal/runtime/sys"
"unsafe"
"unsafe"
"internal/runtime/atomic"
"unsafe"
"internal/abi"
"internal/runtime/maps"
"unsafe"
"internal/goarch"
"unsafe"
"unsafe"
"unsafe"
"unsafe"
"unsafe"
"internal/abi"
"internal/runtime/sys"
"unsafe"
"internal/coverage/rtcov"
"unsafe"
"C"
"unsafe"
"internal/runtime/atomic"
"unsafe"
"internal/goarch"
"unsafe"
"internal/goarch"
"unsafe"
"internal/cpu"
"unsafe"
"internal/runtime/atomic"
"internal/runtime/atomic"
"unsafe"
"unsafe"
"unsafe"
"C"
"internal/cpu"
"internal/runtime/atomic"
"unsafe"
"internal/abi"
"unsafe"
"internal/goarch"
"unsafe"
"internal/runtime/sys"
"unsafe"
"unsafe"
"C"
"internal/abi"
"internal/goarch"
"internal/runtime/sys"
"unsafe"
"unsafe"
"unsafe"
"internal/goarch"
"internal/runtime/sys"
"unsafe"
"internal/runtime/sys"
"unsafe"
"C"
"internal/godebugs"
"unsafe"
"internal/abi"
"unsafe"
"internal/goarch"
"unsafe"
"unsafe"
"unsafe"
"unsafe"
"internal/abi"
"internal/runtime/atomic"
"unsafe"
"internal/abi"
"internal/goarch"
"internal/runtime/atomic"
"unsafe"
"internal/runtime/atomic"
"unsafe"
"internal/runtime/atomic"
"C"
"internal/runtime/atomic"
"unsafe"
"internal/abi"
"internal/goarch"
"internal/profilerecord"
"internal/runtime/atomic"
"internal/runtime/sys"
"unsafe"
"unsafe"
"internal/abi"
"internal/runtime/atomic"
"internal/runtime/sys"
"unsafe"
"internal/runtime/atomic"
_ "unsafe"
"internal/abi"
"internal/goarch"
"internal/runtime/atomic"
"internal/runtime/math"
"internal/runtime/sys"
"unsafe"
"internal/abi"
"unsafe"
"unsafe"
"internal/abi"
"internal/runtime/atomic"
"unsafe"
"internal/abi"
"unsafe"
"internal/abi"
"internal/goarch"
"internal/runtime/atomic"
"internal/runtime/sys"
"unsafe"
"internal/runtime/sys"
"unsafe"
"internal/goarch"
"internal/runtime/sys"
"unsafe"
_ "unsafe"
"unsafe"
"internal/abi"
"internal/goarch"
"unsafe"

Constants & Variables

BUS_ADRALN const #

const BUS_ADRALN = C.BUS_ADRALN

BUS_ADRALN const #

const BUS_ADRALN = C.BUS_ADRALN

BUS_ADRALN const #

const BUS_ADRALN = C.BUS_ADRALN

BUS_ADRALN const #

const BUS_ADRALN = C.BUS_ADRALN

BUS_ADRALN const #

const BUS_ADRALN = C.BUS_ADRALN

BUS_ADRALN const #

const BUS_ADRALN = C.BUS_ADRALN

BUS_ADRALN const #

const BUS_ADRALN = C.BUS_ADRALN

BUS_ADRALN const #

const BUS_ADRALN = *ast.BinaryExpr

BUS_ADRALN const #

const BUS_ADRALN = C.BUS_ADRALN

BUS_ADRERR const #

const BUS_ADRERR = C.BUS_ADRERR

BUS_ADRERR const #

const BUS_ADRERR = C.BUS_ADRERR

BUS_ADRERR const #

const BUS_ADRERR = C.BUS_ADRERR

BUS_ADRERR const #

const BUS_ADRERR = C.BUS_ADRERR

BUS_ADRERR const #

const BUS_ADRERR = C.BUS_ADRERR

BUS_ADRERR const #

const BUS_ADRERR = *ast.BinaryExpr

BUS_ADRERR const #

const BUS_ADRERR = C.BUS_ADRERR

BUS_ADRERR const #

const BUS_ADRERR = C.BUS_ADRERR

BUS_ADRERR const #

const BUS_ADRERR = C.BUS_ADRERR

BUS_OBJERR const #

const BUS_OBJERR = C.BUS_OBJERR

BUS_OBJERR const #

const BUS_OBJERR = C.BUS_OBJERR

BUS_OBJERR const #

const BUS_OBJERR = C.BUS_OBJERR

BUS_OBJERR const #

const BUS_OBJERR = C.BUS_OBJERR

BUS_OBJERR const #

const BUS_OBJERR = C.BUS_OBJERR

BUS_OBJERR const #

const BUS_OBJERR = C.BUS_OBJERR

BUS_OBJERR const #

const BUS_OBJERR = C.BUS_OBJERR

BUS_OBJERR const #

const BUS_OBJERR = C.BUS_OBJERR

BUS_OBJERR const #

const BUS_OBJERR = *ast.BinaryExpr

CLOCK_MONOTONIC const #

const CLOCK_MONOTONIC = C.CLOCK_MONOTONIC

CLOCK_REALTIME const #

const CLOCK_REALTIME = C.CLOCK_REALTIME

CLOCK_THREAD_CPUTIME_ID const #

const CLOCK_THREAD_CPUTIME_ID = C.CLOCK_THREAD_CPUTIME_ID

Compiler const #

Compiler is the name of the compiler toolchain that built the running binary. Known toolchains are: gc Also known as cmd/compile. gccgo The gccgo front end, part of the GCC compiler suite.

const Compiler = "gc"

EAGAIN const #

const EAGAIN = C.EAGAIN

EAGAIN const #

const EAGAIN = C.EAGAIN

EAGAIN const #

const EAGAIN = C.EAGAIN

EAGAIN const #

const EAGAIN = C.EAGAIN

EAGAIN const #

const EAGAIN = C.EAGAIN

EAGAIN const #

const EAGAIN = C.EAGAIN

EAGAIN const #

const EAGAIN = C.EAGAIN

EAGAIN const #

const EAGAIN = C.EAGAIN

EBADF const #

const EBADF = C.EBADF

EBUSY const #

const EBUSY = C.EBUSY

EBUSY const #

const EBUSY = C.EBUSY

EFAULT const #

const EFAULT = C.EFAULT

EFAULT const #

const EFAULT = C.EFAULT

EFAULT const #

const EFAULT = C.EFAULT

EFAULT const #

const EFAULT = C.EFAULT

EFAULT const #

const EFAULT = C.EFAULT

EFAULT const #

const EFAULT = C.EFAULT

EINPROGRESS const #

const EINPROGRESS = C.EINPROGRESS

EINTR const #

const EINTR = C.EINTR

EINTR const #

const EINTR = C.EINTR

EINTR const #

const EINTR = C.EINTR

EINTR const #

const EINTR = C.EINTR

EINTR const #

const EINTR = C.EINTR

EINTR const #

const EINTR = C.EINTR

EINTR const #

const EINTR = C.EINTR

EINTR const #

const EINTR = C.EINTR

ENOMEM const #

const ENOMEM = C.ENOMEM

ENOMEM const #

const ENOMEM = C.ENOMEM

ETIME const #

const ETIME = C.ETIME

ETIMEDOUT const #

const ETIMEDOUT = C.ETIMEDOUT

ETIMEDOUT const #

const ETIMEDOUT = C.ETIMEDOUT

ETIMEDOUT const #

const ETIMEDOUT = C.ETIMEDOUT

ETIMEDOUT const #

const ETIMEDOUT = C.ETIMEDOUT

ETIMEDOUT const #

const ETIMEDOUT = C.ETIMEDOUT

EVFILT_READ const #

const EVFILT_READ = C.EVFILT_READ

EVFILT_READ const #

const EVFILT_READ = C.EVFILT_READ

EVFILT_READ const #

const EVFILT_READ = C.EVFILT_READ

EVFILT_READ const #

const EVFILT_READ = C.EVFILT_READ

EVFILT_READ const #

const EVFILT_READ = C.EVFILT_READ

EVFILT_USER const #

const EVFILT_USER = C.EVFILT_USER

EVFILT_USER const #

const EVFILT_USER = C.EVFILT_USER

EVFILT_USER const #

const EVFILT_USER = C.EVFILT_USER

EVFILT_USER const #

const EVFILT_USER = C.EVFILT_USER

EVFILT_WRITE const #

const EVFILT_WRITE = C.EVFILT_WRITE

EVFILT_WRITE const #

const EVFILT_WRITE = C.EVFILT_WRITE

EVFILT_WRITE const #

const EVFILT_WRITE = C.EVFILT_WRITE

EVFILT_WRITE const #

const EVFILT_WRITE = C.EVFILT_WRITE

EVFILT_WRITE const #

const EVFILT_WRITE = C.EVFILT_WRITE

EV_ADD const #

const EV_ADD = C.EV_ADD

EV_ADD const #

const EV_ADD = C.EV_ADD

EV_ADD const #

const EV_ADD = C.EV_ADD

EV_ADD const #

const EV_ADD = C.EV_ADD

EV_ADD const #

const EV_ADD = C.EV_ADD

EV_CLEAR const #

const EV_CLEAR = C.EV_CLEAR

EV_CLEAR const #

const EV_CLEAR = C.EV_CLEAR

EV_CLEAR const #

const EV_CLEAR = C.EV_CLEAR

EV_CLEAR const #

const EV_CLEAR = C.EV_CLEAR

EV_CLEAR const #

const EV_CLEAR = C.EV_CLEAR

EV_DELETE const #

const EV_DELETE = C.EV_DELETE

EV_DELETE const #

const EV_DELETE = C.EV_DELETE

EV_DELETE const #

const EV_DELETE = C.EV_DELETE

EV_DELETE const #

const EV_DELETE = C.EV_DELETE

EV_DELETE const #

const EV_DELETE = C.EV_DELETE

EV_DISABLE const #

const EV_DISABLE = C.EV_DISABLE

EV_DISABLE const #

const EV_DISABLE = C.EV_DISABLE

EV_DISABLE const #

const EV_DISABLE = C.EV_DISABLE

EV_DISABLE const #

const EV_DISABLE = C.EV_DISABLE

EV_ENABLE const #

const EV_ENABLE = C.EV_ENABLE

EV_ENABLE const #

const EV_ENABLE = C.EV_ENABLE

EV_ENABLE const #

const EV_ENABLE = C.EV_ENABLE

EV_ENABLE const #

const EV_ENABLE = C.EV_ENABLE

EV_EOF const #

const EV_EOF = C.EV_EOF

EV_EOF const #

const EV_EOF = C.EV_EOF

EV_EOF const #

const EV_EOF = C.EV_EOF

EV_EOF const #

const EV_EOF = C.EV_EOF

EV_EOF const #

const EV_EOF = C.EV_EOF

EV_ERROR const #

const EV_ERROR = C.EV_ERROR

EV_ERROR const #

const EV_ERROR = C.EV_ERROR

EV_ERROR const #

const EV_ERROR = C.EV_ERROR

EV_ERROR const #

const EV_ERROR = C.EV_ERROR

EV_ERROR const #

const EV_ERROR = C.EV_ERROR

EV_RECEIPT const #

const EV_RECEIPT = C.EV_RECEIPT

EV_RECEIPT const #

const EV_RECEIPT = C.EV_RECEIPT

EV_RECEIPT const #

const EV_RECEIPT = 0

EWOULDBLOCK const #

const EWOULDBLOCK = C.EWOULDBLOCK

FORK_NOSIGCHLD const #

const FORK_NOSIGCHLD = C.FORK_NOSIGCHLD

FORK_WAITPID const #

const FORK_WAITPID = C.FORK_WAITPID

FPE_FLTDIV const #

const FPE_FLTDIV = C.FPE_FLTDIV

FPE_FLTDIV const #

const FPE_FLTDIV = C.FPE_FLTDIV

FPE_FLTDIV const #

const FPE_FLTDIV = C.FPE_FLTDIV

FPE_FLTDIV const #

const FPE_FLTDIV = C.FPE_FLTDIV

FPE_FLTDIV const #

const FPE_FLTDIV = *ast.BinaryExpr

FPE_FLTDIV const #

const FPE_FLTDIV = C.FPE_FLTDIV

FPE_FLTDIV const #

const FPE_FLTDIV = C.FPE_FLTDIV

FPE_FLTDIV const #

const FPE_FLTDIV = C.FPE_FLTDIV

FPE_FLTDIV const #

const FPE_FLTDIV = C.FPE_FLTDIV

FPE_FLTINV const #

const FPE_FLTINV = C.FPE_FLTINV

FPE_FLTINV const #

const FPE_FLTINV = *ast.BinaryExpr

FPE_FLTINV const #

const FPE_FLTINV = C.FPE_FLTINV

FPE_FLTINV const #

const FPE_FLTINV = C.FPE_FLTINV

FPE_FLTINV const #

const FPE_FLTINV = C.FPE_FLTINV

FPE_FLTINV const #

const FPE_FLTINV = C.FPE_FLTINV

FPE_FLTINV const #

const FPE_FLTINV = C.FPE_FLTINV

FPE_FLTINV const #

const FPE_FLTINV = C.FPE_FLTINV

FPE_FLTINV const #

const FPE_FLTINV = C.FPE_FLTINV

FPE_FLTOVF const #

const FPE_FLTOVF = C.FPE_FLTOVF

FPE_FLTOVF const #

const FPE_FLTOVF = C.FPE_FLTOVF

FPE_FLTOVF const #

const FPE_FLTOVF = C.FPE_FLTOVF

FPE_FLTOVF const #

const FPE_FLTOVF = C.FPE_FLTOVF

FPE_FLTOVF const #

const FPE_FLTOVF = *ast.BinaryExpr

FPE_FLTOVF const #

const FPE_FLTOVF = C.FPE_FLTOVF

FPE_FLTOVF const #

const FPE_FLTOVF = C.FPE_FLTOVF

FPE_FLTOVF const #

const FPE_FLTOVF = C.FPE_FLTOVF

FPE_FLTOVF const #

const FPE_FLTOVF = C.FPE_FLTOVF

FPE_FLTRES const #

const FPE_FLTRES = C.FPE_FLTRES

FPE_FLTRES const #

const FPE_FLTRES = *ast.BinaryExpr

FPE_FLTRES const #

const FPE_FLTRES = C.FPE_FLTRES

FPE_FLTRES const #

const FPE_FLTRES = C.FPE_FLTRES

FPE_FLTRES const #

const FPE_FLTRES = C.FPE_FLTRES

FPE_FLTRES const #

const FPE_FLTRES = C.FPE_FLTRES

FPE_FLTRES const #

const FPE_FLTRES = C.FPE_FLTRES

FPE_FLTRES const #

const FPE_FLTRES = C.FPE_FLTRES

FPE_FLTRES const #

const FPE_FLTRES = C.FPE_FLTRES

FPE_FLTSUB const #

const FPE_FLTSUB = C.FPE_FLTSUB

FPE_FLTSUB const #

const FPE_FLTSUB = C.FPE_FLTSUB

FPE_FLTSUB const #

const FPE_FLTSUB = C.FPE_FLTSUB

FPE_FLTSUB const #

const FPE_FLTSUB = C.FPE_FLTSUB

FPE_FLTSUB const #

const FPE_FLTSUB = C.FPE_FLTSUB

FPE_FLTSUB const #

const FPE_FLTSUB = C.FPE_FLTSUB

FPE_FLTSUB const #

const FPE_FLTSUB = *ast.BinaryExpr

FPE_FLTSUB const #

const FPE_FLTSUB = C.FPE_FLTSUB

FPE_FLTSUB const #

const FPE_FLTSUB = C.FPE_FLTSUB

FPE_FLTUND const #

const FPE_FLTUND = C.FPE_FLTUND

FPE_FLTUND const #

const FPE_FLTUND = C.FPE_FLTUND

FPE_FLTUND const #

const FPE_FLTUND = C.FPE_FLTUND

FPE_FLTUND const #

const FPE_FLTUND = C.FPE_FLTUND

FPE_FLTUND const #

const FPE_FLTUND = C.FPE_FLTUND

FPE_FLTUND const #

const FPE_FLTUND = C.FPE_FLTUND

FPE_FLTUND const #

const FPE_FLTUND = *ast.BinaryExpr

FPE_FLTUND const #

const FPE_FLTUND = C.FPE_FLTUND

FPE_FLTUND const #

const FPE_FLTUND = C.FPE_FLTUND

FPE_INTDIV const #

const FPE_INTDIV = C.FPE_INTDIV

FPE_INTDIV const #

const FPE_INTDIV = C.FPE_INTDIV

FPE_INTDIV const #

const FPE_INTDIV = C.FPE_INTDIV

FPE_INTDIV const #

const FPE_INTDIV = C.FPE_INTDIV

FPE_INTDIV const #

const FPE_INTDIV = *ast.BinaryExpr

FPE_INTDIV const #

const FPE_INTDIV = C.FPE_INTDIV

FPE_INTDIV const #

const FPE_INTDIV = C.FPE_INTDIV

FPE_INTDIV const #

const FPE_INTDIV = C.FPE_INTDIV

FPE_INTDIV const #

const FPE_INTDIV = C.FPE_INTDIV

FPE_INTOVF const #

const FPE_INTOVF = C.FPE_INTOVF

FPE_INTOVF const #

const FPE_INTOVF = C.FPE_INTOVF

FPE_INTOVF const #

const FPE_INTOVF = C.FPE_INTOVF

FPE_INTOVF const #

const FPE_INTOVF = C.FPE_INTOVF

FPE_INTOVF const #

const FPE_INTOVF = *ast.BinaryExpr

FPE_INTOVF const #

const FPE_INTOVF = C.FPE_INTOVF

FPE_INTOVF const #

const FPE_INTOVF = C.FPE_INTOVF

FPE_INTOVF const #

const FPE_INTOVF = C.FPE_INTOVF

FPE_INTOVF const #

const FPE_INTOVF = C.FPE_INTOVF

F_GETFL const #

const F_GETFL = C.F_GETFL

F_SETFL const #

const F_SETFL = C.F_SETFL

GOARCH const #

GOARCH is the running program's architecture target: one of 386, amd64, arm, s390x, and so on.

const GOARCH string = goarch.GOARCH

GOOS const #

GOOS is the running program's operating system target: one of darwin, freebsd, linux, and so on. To view possible combinations of GOOS and GOARCH, run "go tool dist list".

const GOOS string = goos.GOOS

ITIMER_PROF const #

const ITIMER_PROF = C.ITIMER_PROF

ITIMER_PROF const #

const ITIMER_PROF = C.ITIMER_PROF

ITIMER_PROF const #

const ITIMER_PROF = C.ITIMER_PROF

ITIMER_PROF const #

const ITIMER_PROF = C.ITIMER_PROF

ITIMER_PROF const #

const ITIMER_PROF = C.ITIMER_PROF

ITIMER_PROF const #

const ITIMER_PROF = C.ITIMER_PROF

ITIMER_PROF const #

const ITIMER_PROF = C.ITIMER_PROF

ITIMER_PROF const #

const ITIMER_PROF = C.ITIMER_PROF

ITIMER_PROF const #

const ITIMER_PROF = C.ITIMER_PROF

ITIMER_REAL const #

const ITIMER_REAL = C.ITIMER_REAL

ITIMER_REAL const #

const ITIMER_REAL = C.ITIMER_REAL

ITIMER_REAL const #

const ITIMER_REAL = C.ITIMER_REAL

ITIMER_REAL const #

const ITIMER_REAL = C.ITIMER_REAL

ITIMER_REAL const #

const ITIMER_REAL = C.ITIMER_REAL

ITIMER_REAL const #

const ITIMER_REAL = C.ITIMER_REAL

ITIMER_REAL const #

const ITIMER_REAL = C.ITIMER_REAL

ITIMER_REAL const #

const ITIMER_REAL = C.ITIMER_REAL

ITIMER_REAL const #

const ITIMER_REAL = C.ITIMER_REAL

ITIMER_VIRTUAL const #

const ITIMER_VIRTUAL = C.ITIMER_VIRTUAL

ITIMER_VIRTUAL const #

const ITIMER_VIRTUAL = C.ITIMER_VIRTUAL

ITIMER_VIRTUAL const #

const ITIMER_VIRTUAL = C.ITIMER_VIRTUAL

ITIMER_VIRTUAL const #

const ITIMER_VIRTUAL = C.ITIMER_VIRTUAL

ITIMER_VIRTUAL const #

const ITIMER_VIRTUAL = C.ITIMER_VIRTUAL

ITIMER_VIRTUAL const #

const ITIMER_VIRTUAL = C.ITIMER_VIRTUAL

ITIMER_VIRTUAL const #

const ITIMER_VIRTUAL = C.ITIMER_VIRTUAL

ITIMER_VIRTUAL const #

const ITIMER_VIRTUAL = C.ITIMER_VIRTUAL

ITIMER_VIRTUAL const #

const ITIMER_VIRTUAL = C.ITIMER_VIRTUAL

MADV_DONTNEED const #

const MADV_DONTNEED = C.MADV_DONTNEED

MADV_DONTNEED const #

const MADV_DONTNEED = C.MADV_DONTNEED

MADV_DONTNEED const #

const MADV_DONTNEED = C.MADV_DONTNEED

MADV_DONTNEED const #

const MADV_DONTNEED = C.MADV_DONTNEED

MADV_DONTNEED const #

const MADV_DONTNEED = C.MADV_DONTNEED

MADV_DONTNEED const #

const MADV_DONTNEED = C.MADV_DONTNEED

MADV_DONTNEED const #

const MADV_DONTNEED = C.MADV_DONTNEED

MADV_DONTNEED const #

const MADV_DONTNEED = C.MADV_DONTNEED

MADV_DONTNEED const #

const MADV_DONTNEED = C.MADV_DONTNEED

MADV_FREE const #

const MADV_FREE = C.MADV_FREE

MADV_FREE const #

const MADV_FREE = C.MADV_FREE

MADV_FREE const #

const MADV_FREE = C.MADV_FREE

MADV_FREE const #

const MADV_FREE = C.MADV_FREE

MADV_FREE const #

const MADV_FREE = C.MADV_FREE

MADV_FREE const #

const MADV_FREE = C.MADV_FREE

MADV_FREE const #

const MADV_FREE = C.MADV_FREE

MADV_FREE const #

const MADV_FREE = C.MADV_FREE

MADV_FREE_REUSABLE const #

const MADV_FREE_REUSABLE = C.MADV_FREE_REUSABLE

MADV_FREE_REUSE const #

const MADV_FREE_REUSE = C.MADV_FREE_REUSE

MADV_HUGEPAGE const #

const MADV_HUGEPAGE = C.MADV_HUGEPAGE

MADV_HUGEPAGE const #

const MADV_HUGEPAGE = C.MADV_HUGEPAGE

MADV_NOHUGEPAGE const #

const MADV_NOHUGEPAGE = C.MADV_NOHUGEPAGE

MADV_NOHUGEPAGE const #

const MADV_NOHUGEPAGE = C.MADV_NOHUGEPAGE

MAP_ANON const #

const MAP_ANON = C.MAP_ANON

MAP_ANON const #

const MAP_ANON = C.MAP_ANON

MAP_ANON const #

const MAP_ANON = C.MAP_ANONYMOUS

MAP_ANON const #

const MAP_ANON = C.MAP_ANON

MAP_ANON const #

const MAP_ANON = C.MAP_ANONYMOUS

MAP_ANON const #

const MAP_ANON = C.MAP_ANON

MAP_ANON const #

const MAP_ANON = C.MAP_ANON

MAP_ANON const #

const MAP_ANON = C.MAP_ANONYMOUS

MAP_ANON const #

const MAP_ANON = C.MAP_ANON

MAP_FIXED const #

const MAP_FIXED = C.MAP_FIXED

MAP_FIXED const #

const MAP_FIXED = C.MAP_FIXED

MAP_FIXED const #

const MAP_FIXED = C.MAP_FIXED

MAP_FIXED const #

const MAP_FIXED = C.MAP_FIXED

MAP_FIXED const #

const MAP_FIXED = C.MAP_FIXED

MAP_FIXED const #

const MAP_FIXED = C.MAP_FIXED

MAP_FIXED const #

const MAP_FIXED = C.MAP_FIXED

MAP_FIXED const #

const MAP_FIXED = C.MAP_FIXED

MAP_FIXED const #

const MAP_FIXED = C.MAP_FIXED

MAP_PRIVATE const #

const MAP_PRIVATE = C.MAP_PRIVATE

MAP_PRIVATE const #

const MAP_PRIVATE = C.MAP_PRIVATE

MAP_PRIVATE const #

const MAP_PRIVATE = C.MAP_PRIVATE

MAP_PRIVATE const #

const MAP_PRIVATE = C.MAP_PRIVATE

MAP_PRIVATE const #

const MAP_PRIVATE = C.MAP_PRIVATE

MAP_PRIVATE const #

const MAP_PRIVATE = C.MAP_PRIVATE

MAP_PRIVATE const #

const MAP_PRIVATE = C.MAP_PRIVATE

MAP_PRIVATE const #

const MAP_PRIVATE = C.MAP_PRIVATE

MAP_PRIVATE const #

const MAP_PRIVATE = C.MAP_PRIVATE

MAP_SHARED const #

const MAP_SHARED = C.MAP_SHARED

MAP_STACK const #

const MAP_STACK = C.MAP_STACK

MAXHOSTNAMELEN const #

const MAXHOSTNAMELEN = C.MAXHOSTNAMELEN

MemProfileRate var #

MemProfileRate controls the fraction of memory allocations that are recorded and reported in the memory profile. The profiler aims to sample an average of one allocation per MemProfileRate bytes allocated. To include every allocated block in the profile, set MemProfileRate to 1. To turn off profiling entirely, set MemProfileRate to 0. The tools that process the memory profiles assume that the profile rate is constant across the lifetime of the program and equal to the current value. Programs that change the memory profiling rate should do so just once, as early as possible in the execution of the program (for example, at the beginning of main).

var MemProfileRate int = *ast.BinaryExpr

NOTE_TRIGGER const #

const NOTE_TRIGGER = C.NOTE_TRIGGER

NOTE_TRIGGER const #

const NOTE_TRIGGER = C.NOTE_TRIGGER

NOTE_TRIGGER const #

const NOTE_TRIGGER = C.NOTE_TRIGGER

NOTE_TRIGGER const #

const NOTE_TRIGGER = C.NOTE_TRIGGER

O_CLOEXEC const #

const O_CLOEXEC = C.O_CLOEXEC

O_CLOEXEC const #

const O_CLOEXEC = C.O_CLOEXEC

O_CLOEXEC const #

const O_CLOEXEC = C.O_CLOEXEC

O_CLOEXEC const #

const O_CLOEXEC = C.O_CLOEXEC

O_CLOEXEC const #

const O_CLOEXEC = C.O_CLOEXEC

O_CLOEXEC const #

const O_CLOEXEC = C.O_CLOEXEC

O_CLOEXEC const #

const O_CLOEXEC = C.O_CLOEXEC

O_CLOEXEC const #

const O_CLOEXEC = C.O_CLOEXEC

O_CREAT const #

const O_CREAT = C.O_CREAT

O_CREAT const #

const O_CREAT = C.O_CREAT

O_CREAT const #

const O_CREAT = C.O_CREAT

O_CREAT const #

const O_CREAT = C.O_CREAT

O_CREAT const #

const O_CREAT = C.O_CREAT

O_NONBLOCK const #

const O_NONBLOCK = C.O_NONBLOCK

O_NONBLOCK const #

const O_NONBLOCK = C.O_NONBLOCK

O_NONBLOCK const #

const O_NONBLOCK = C.O_NONBLOCK

O_NONBLOCK const #

const O_NONBLOCK = C.O_NONBLOCK

O_NONBLOCK const #

const O_NONBLOCK = C.O_NONBLOCK

O_NONBLOCK const #

const O_NONBLOCK = C.O_NONBLOCK

O_NONBLOCK const #

const O_NONBLOCK = C.O_NONBLOCK

O_RDONLY const #

const O_RDONLY = C.O_RDONLY

O_RDONLY const #

const O_RDONLY = C.O_RDONLY

O_RDONLY const #

const O_RDONLY = C.O_RDONLY

O_TRUNC const #

const O_TRUNC = C.O_TRUNC

O_TRUNC const #

const O_TRUNC = C.O_TRUNC

O_TRUNC const #

const O_TRUNC = C.O_TRUNC

O_TRUNC const #

const O_TRUNC = C.O_TRUNC

O_TRUNC const #

const O_TRUNC = C.O_TRUNC

O_WRONLY const #

const O_WRONLY = C.O_WRONLY

O_WRONLY const #

const O_WRONLY = C.O_WRONLY

O_WRONLY const #

const O_WRONLY = C.O_WRONLY

O_WRONLY const #

const O_WRONLY = C.O_WRONLY

O_WRONLY const #

const O_WRONLY = C.O_WRONLY

POLLERR const #

const POLLERR = C.POLLERR

POLLHUP const #

const POLLHUP = C.POLLHUP

POLLIN const #

const POLLIN = C.POLLIN

POLLOUT const #

const POLLOUT = C.POLLOUT

PORT_ALERT_UPDATE const #

const PORT_ALERT_UPDATE = C.PORT_ALERT_UPDATE

PORT_SOURCE_ALERT const #

const PORT_SOURCE_ALERT = C.PORT_SOURCE_ALERT

PORT_SOURCE_FD const #

const PORT_SOURCE_FD = C.PORT_SOURCE_FD

PROT_EXEC const #

const PROT_EXEC = C.PROT_EXEC

PROT_EXEC const #

const PROT_EXEC = C.PROT_EXEC

PROT_EXEC const #

const PROT_EXEC = C.PROT_EXEC

PROT_EXEC const #

const PROT_EXEC = C.PROT_EXEC

PROT_EXEC const #

const PROT_EXEC = C.PROT_EXEC

PROT_EXEC const #

const PROT_EXEC = C.PROT_EXEC

PROT_EXEC const #

const PROT_EXEC = C.PROT_EXEC

PROT_EXEC const #

const PROT_EXEC = C.PROT_EXEC

PROT_EXEC const #

const PROT_EXEC = C.PROT_EXEC

PROT_NONE const #

const PROT_NONE = C.PROT_NONE

PROT_NONE const #

const PROT_NONE = C.PROT_NONE

PROT_NONE const #

const PROT_NONE = C.PROT_NONE

PROT_NONE const #

const PROT_NONE = C.PROT_NONE

PROT_NONE const #

const PROT_NONE = C.PROT_NONE

PROT_NONE const #

const PROT_NONE = C.PROT_NONE

PROT_NONE const #

const PROT_NONE = C.PROT_NONE

PROT_NONE const #

const PROT_NONE = C.PROT_NONE

PROT_NONE const #

const PROT_NONE = C.PROT_NONE

PROT_READ const #

const PROT_READ = C.PROT_READ

PROT_READ const #

const PROT_READ = C.PROT_READ

PROT_READ const #

const PROT_READ = C.PROT_READ

PROT_READ const #

const PROT_READ = C.PROT_READ

PROT_READ const #

const PROT_READ = C.PROT_READ

PROT_READ const #

const PROT_READ = C.PROT_READ

PROT_READ const #

const PROT_READ = C.PROT_READ

PROT_READ const #

const PROT_READ = C.PROT_READ

PROT_READ const #

const PROT_READ = C.PROT_READ

PROT_WRITE const #

const PROT_WRITE = C.PROT_WRITE

PROT_WRITE const #

const PROT_WRITE = C.PROT_WRITE

PROT_WRITE const #

const PROT_WRITE = C.PROT_WRITE

PROT_WRITE const #

const PROT_WRITE = C.PROT_WRITE

PROT_WRITE const #

const PROT_WRITE = C.PROT_WRITE

PROT_WRITE const #

const PROT_WRITE = C.PROT_WRITE

PROT_WRITE const #

const PROT_WRITE = C.PROT_WRITE

PROT_WRITE const #

const PROT_WRITE = C.PROT_WRITE

PROT_WRITE const #

const PROT_WRITE = C.PROT_WRITE

PTHREAD_CREATE_DETACHED const #

const PTHREAD_CREATE_DETACHED = C.PTHREAD_CREATE_DETACHED

PTHREAD_CREATE_DETACHED const #

const PTHREAD_CREATE_DETACHED = C.PTHREAD_CREATE_DETACHED

PTHREAD_CREATE_DETACHED const #

const PTHREAD_CREATE_DETACHED = C.PTHREAD_CREATE_DETACHED

REG_CPSR const #

const REG_CPSR = C._REG_CPSR

REG_CS const #

const REG_CS = C._REG_CS

REG_CS const #

const REG_CS = C._REG_CS

REG_CS const #

const REG_CS = C.REG_CS

REG_DS const #

const REG_DS = C._REG_DS

REG_DS const #

const REG_DS = C._REG_DS

REG_DS const #

const REG_DS = C.REG_DS

REG_EAX const #

const REG_EAX = C._REG_EAX

REG_EBP const #

const REG_EBP = C._REG_EBP

REG_EBX const #

const REG_EBX = C._REG_EBX

REG_ECX const #

const REG_ECX = C._REG_ECX

REG_EDI const #

const REG_EDI = C._REG_EDI

REG_EDX const #

const REG_EDX = C._REG_EDX

REG_EFL const #

const REG_EFL = C._REG_EFL

REG_EIP const #

const REG_EIP = C._REG_EIP

REG_ERR const #

const REG_ERR = C._REG_ERR

REG_ERR const #

const REG_ERR = C.REG_ERR

REG_ERR const #

const REG_ERR = C._REG_ERR

REG_ES const #

const REG_ES = C.REG_ES

REG_ES const #

const REG_ES = C._REG_ES

REG_ES const #

const REG_ES = C._REG_ES

REG_ESI const #

const REG_ESI = C._REG_ESI

REG_ESP const #

const REG_ESP = C._REG_ESP

REG_FS const #

const REG_FS = C.REG_FS

REG_FS const #

const REG_FS = C._REG_FS

REG_FS const #

const REG_FS = C._REG_FS

REG_GS const #

const REG_GS = C.REG_GS

REG_GS const #

const REG_GS = C._REG_GS

REG_GS const #

const REG_GS = C._REG_GS

REG_R0 const #

const REG_R0 = C._REG_R0

REG_R1 const #

const REG_R1 = C._REG_R1

REG_R10 const #

const REG_R10 = C.REG_R10

REG_R10 const #

const REG_R10 = C._REG_R10

REG_R10 const #

const REG_R10 = C._REG_R10

REG_R11 const #

const REG_R11 = C._REG_R11

REG_R11 const #

const REG_R11 = C._REG_R11

REG_R11 const #

const REG_R11 = C.REG_R11

REG_R12 const #

const REG_R12 = C.REG_R12

REG_R12 const #

const REG_R12 = C._REG_R12

REG_R12 const #

const REG_R12 = C._REG_R12

REG_R13 const #

const REG_R13 = C._REG_R13

REG_R13 const #

const REG_R13 = C._REG_R13

REG_R13 const #

const REG_R13 = C.REG_R13

REG_R14 const #

const REG_R14 = C.REG_R14

REG_R14 const #

const REG_R14 = C._REG_R14

REG_R14 const #

const REG_R14 = C._REG_R14

REG_R15 const #

const REG_R15 = C.REG_R15

REG_R15 const #

const REG_R15 = C._REG_R15

REG_R15 const #

const REG_R15 = C._REG_R15

REG_R2 const #

const REG_R2 = C._REG_R2

REG_R3 const #

const REG_R3 = C._REG_R3

REG_R4 const #

const REG_R4 = C._REG_R4

REG_R5 const #

const REG_R5 = C._REG_R5

REG_R6 const #

const REG_R6 = C._REG_R6

REG_R7 const #

const REG_R7 = C._REG_R7

REG_R8 const #

const REG_R8 = C._REG_R8

REG_R8 const #

const REG_R8 = C.REG_R8

REG_R8 const #

const REG_R8 = C._REG_R8

REG_R9 const #

const REG_R9 = C._REG_R9

REG_R9 const #

const REG_R9 = C._REG_R9

REG_R9 const #

const REG_R9 = C.REG_R9

REG_RAX const #

const REG_RAX = C.REG_RAX

REG_RAX const #

const REG_RAX = C._REG_RAX

REG_RBP const #

const REG_RBP = C._REG_RBP

REG_RBP const #

const REG_RBP = C.REG_RBP

REG_RBX const #

const REG_RBX = C._REG_RBX

REG_RBX const #

const REG_RBX = C.REG_RBX

REG_RCX const #

const REG_RCX = C._REG_RCX

REG_RCX const #

const REG_RCX = C.REG_RCX

REG_RDI const #

const REG_RDI = C.REG_RDI

REG_RDI const #

const REG_RDI = C._REG_RDI

REG_RDX const #

const REG_RDX = C.REG_RDX

REG_RDX const #

const REG_RDX = C._REG_RDX

REG_RFLAGS const #

const REG_RFLAGS = C.REG_RFL

REG_RFLAGS const #

const REG_RFLAGS = C._REG_RFLAGS

REG_RIP const #

const REG_RIP = C.REG_RIP

REG_RIP const #

const REG_RIP = C._REG_RIP

REG_RSI const #

const REG_RSI = C._REG_RSI

REG_RSI const #

const REG_RSI = C.REG_RSI

REG_RSP const #

const REG_RSP = C._REG_RSP

REG_RSP const #

const REG_RSP = C.REG_RSP

REG_SS const #

const REG_SS = C.REG_SS

REG_SS const #

const REG_SS = C._REG_SS

REG_SS const #

const REG_SS = C._REG_SS

REG_TRAPNO const #

const REG_TRAPNO = C._REG_TRAPNO

REG_TRAPNO const #

const REG_TRAPNO = C.REG_TRAPNO

REG_TRAPNO const #

const REG_TRAPNO = C._REG_TRAPNO

REG_UESP const #

const REG_UESP = C._REG_UESP

SA_64REGSET const #

const SA_64REGSET = C.SA_64REGSET

SA_ONSTACK const #

const SA_ONSTACK = C.SA_ONSTACK

SA_ONSTACK const #

const SA_ONSTACK = C.SA_ONSTACK

SA_ONSTACK const #

const SA_ONSTACK = C.SA_ONSTACK

SA_ONSTACK const #

const SA_ONSTACK = C.SA_ONSTACK

SA_ONSTACK const #

const SA_ONSTACK = C.SA_ONSTACK

SA_ONSTACK const #

const SA_ONSTACK = C.SA_ONSTACK

SA_ONSTACK const #

const SA_ONSTACK = C.SA_ONSTACK

SA_ONSTACK const #

const SA_ONSTACK = C.SA_ONSTACK

SA_ONSTACK const #

const SA_ONSTACK = C.SA_ONSTACK

SA_RESTART const #

const SA_RESTART = C.SA_RESTART

SA_RESTART const #

const SA_RESTART = C.SA_RESTART

SA_RESTART const #

const SA_RESTART = C.SA_RESTART

SA_RESTART const #

const SA_RESTART = C.SA_RESTART

SA_RESTART const #

const SA_RESTART = C.SA_RESTART

SA_RESTART const #

const SA_RESTART = C.SA_RESTART

SA_RESTART const #

const SA_RESTART = C.SA_RESTART

SA_RESTART const #

const SA_RESTART = C.SA_RESTART

SA_RESTART const #

const SA_RESTART = C.SA_RESTART

SA_RESTORER const #

const SA_RESTORER = C.SA_RESTORER

SA_RESTORER const #

const SA_RESTORER = C.SA_RESTORER

SA_RESTORER const #

const SA_RESTORER = C.SA_RESTORER

SA_RESTORER const #

const SA_RESTORER = 0

SA_SIGINFO const #

const SA_SIGINFO = C.SA_SIGINFO

SA_SIGINFO const #

const SA_SIGINFO = C.SA_SIGINFO

SA_SIGINFO const #

const SA_SIGINFO = C.SA_SIGINFO

SA_SIGINFO const #

const SA_SIGINFO = C.SA_SIGINFO

SA_SIGINFO const #

const SA_SIGINFO = C.SA_SIGINFO

SA_SIGINFO const #

const SA_SIGINFO = C.SA_SIGINFO

SA_SIGINFO const #

const SA_SIGINFO = C.SA_SIGINFO

SA_SIGINFO const #

const SA_SIGINFO = C.SA_SIGINFO

SA_SIGINFO const #

const SA_SIGINFO = C.SA_SIGINFO

SA_USERTRAMP const #

const SA_USERTRAMP = C.SA_USERTRAMP

SEGV_ACCERR const #

const SEGV_ACCERR = C.SEGV_ACCERR

SEGV_ACCERR const #

const SEGV_ACCERR = C.SEGV_ACCERR

SEGV_ACCERR const #

const SEGV_ACCERR = C.SEGV_ACCERR

SEGV_ACCERR const #

const SEGV_ACCERR = C.SEGV_ACCERR

SEGV_ACCERR const #

const SEGV_ACCERR = C.SEGV_ACCERR

SEGV_ACCERR const #

const SEGV_ACCERR = C.SEGV_ACCERR

SEGV_ACCERR const #

const SEGV_ACCERR = C.SEGV_ACCERR

SEGV_ACCERR const #

const SEGV_ACCERR = *ast.BinaryExpr

SEGV_ACCERR const #

const SEGV_ACCERR = C.SEGV_ACCERR

SEGV_MAPERR const #

const SEGV_MAPERR = C.SEGV_MAPERR

SEGV_MAPERR const #

const SEGV_MAPERR = C.SEGV_MAPERR

SEGV_MAPERR const #

const SEGV_MAPERR = C.SEGV_MAPERR

SEGV_MAPERR const #

const SEGV_MAPERR = *ast.BinaryExpr

SEGV_MAPERR const #

const SEGV_MAPERR = C.SEGV_MAPERR

SEGV_MAPERR const #

const SEGV_MAPERR = C.SEGV_MAPERR

SEGV_MAPERR const #

const SEGV_MAPERR = C.SEGV_MAPERR

SEGV_MAPERR const #

const SEGV_MAPERR = C.SEGV_MAPERR

SEGV_MAPERR const #

const SEGV_MAPERR = C.SEGV_MAPERR

SIGABRT const #

const SIGABRT = C.SIGABRT

SIGABRT const #

const SIGABRT = C.SIGABRT

SIGABRT const #

const SIGABRT = C.SIGABRT

SIGABRT const #

const SIGABRT = C.SIGABRT

SIGABRT const #

const SIGABRT = C.SIGABRT

SIGABRT const #

const SIGABRT = C.SIGABRT

SIGABRT const #

const SIGABRT = C.SIGABRT

SIGABRT const #

const SIGABRT = C.SIGABRT

SIGABRT const #

const SIGABRT = C.SIGABRT

SIGALRM const #

const SIGALRM = C.SIGALRM

SIGALRM const #

const SIGALRM = C.SIGALRM

SIGALRM const #

const SIGALRM = C.SIGALRM

SIGALRM const #

const SIGALRM = C.SIGALRM

SIGALRM const #

const SIGALRM = C.SIGALRM

SIGALRM const #

const SIGALRM = C.SIGALRM

SIGALRM const #

const SIGALRM = C.SIGALRM

SIGALRM const #

const SIGALRM = C.SIGALRM

SIGALRM const #

const SIGALRM = C.SIGALRM

SIGBUS const #

const SIGBUS = C.SIGBUS

SIGBUS const #

const SIGBUS = C.SIGBUS

SIGBUS const #

const SIGBUS = C.SIGBUS

SIGBUS const #

const SIGBUS = C.SIGBUS

SIGBUS const #

const SIGBUS = C.SIGBUS

SIGBUS const #

const SIGBUS = C.SIGBUS

SIGBUS const #

const SIGBUS = C.SIGBUS

SIGBUS const #

const SIGBUS = C.SIGBUS

SIGBUS const #

const SIGBUS = C.SIGBUS

SIGCHLD const #

const SIGCHLD = C.SIGCHLD

SIGCHLD const #

const SIGCHLD = C.SIGCHLD

SIGCHLD const #

const SIGCHLD = C.SIGCHLD

SIGCHLD const #

const SIGCHLD = C.SIGCHLD

SIGCHLD const #

const SIGCHLD = C.SIGCHLD

SIGCHLD const #

const SIGCHLD = C.SIGCHLD

SIGCHLD const #

const SIGCHLD = C.SIGCHLD

SIGCHLD const #

const SIGCHLD = C.SIGCHLD

SIGCHLD const #

const SIGCHLD = C.SIGCHLD

SIGCONT const #

const SIGCONT = C.SIGCONT

SIGCONT const #

const SIGCONT = C.SIGCONT

SIGCONT const #

const SIGCONT = C.SIGCONT

SIGCONT const #

const SIGCONT = C.SIGCONT

SIGCONT const #

const SIGCONT = C.SIGCONT

SIGCONT const #

const SIGCONT = C.SIGCONT

SIGCONT const #

const SIGCONT = C.SIGCONT

SIGCONT const #

const SIGCONT = C.SIGCONT

SIGCONT const #

const SIGCONT = C.SIGCONT

SIGEMT const #

const SIGEMT = C.SIGEMT

SIGEMT const #

const SIGEMT = C.SIGEMT

SIGEMT const #

const SIGEMT = C.SIGEMT

SIGEMT const #

const SIGEMT = C.SIGEMT

SIGEMT const #

const SIGEMT = C.SIGEMT

SIGEMT const #

const SIGEMT = C.SIGEMT

SIGEV_THREAD_ID const #

const SIGEV_THREAD_ID = C.SIGEV_THREAD_ID

SIGFPE const #

const SIGFPE = C.SIGFPE

SIGFPE const #

const SIGFPE = C.SIGFPE

SIGFPE const #

const SIGFPE = C.SIGFPE

SIGFPE const #

const SIGFPE = C.SIGFPE

SIGFPE const #

const SIGFPE = C.SIGFPE

SIGFPE const #

const SIGFPE = C.SIGFPE

SIGFPE const #

const SIGFPE = C.SIGFPE

SIGFPE const #

const SIGFPE = C.SIGFPE

SIGFPE const #

const SIGFPE = C.SIGFPE

SIGHUP const #

const SIGHUP = C.SIGHUP

SIGHUP const #

const SIGHUP = C.SIGHUP

SIGHUP const #

const SIGHUP = C.SIGHUP

SIGHUP const #

const SIGHUP = C.SIGHUP

SIGHUP const #

const SIGHUP = C.SIGHUP

SIGHUP const #

const SIGHUP = C.SIGHUP

SIGHUP const #

const SIGHUP = C.SIGHUP

SIGHUP const #

const SIGHUP = C.SIGHUP

SIGHUP const #

const SIGHUP = C.SIGHUP

SIGILL const #

const SIGILL = C.SIGILL

SIGILL const #

const SIGILL = C.SIGILL

SIGILL const #

const SIGILL = C.SIGILL

SIGILL const #

const SIGILL = C.SIGILL

SIGILL const #

const SIGILL = C.SIGILL

SIGILL const #

const SIGILL = C.SIGILL

SIGILL const #

const SIGILL = C.SIGILL

SIGILL const #

const SIGILL = C.SIGILL

SIGILL const #

const SIGILL = C.SIGILL

SIGINFO const #

const SIGINFO = C.SIGINFO

SIGINFO const #

const SIGINFO = C.SIGINFO

SIGINFO const #

const SIGINFO = C.SIGINFO

SIGINFO const #

const SIGINFO = C.SIGINFO

SIGINFO const #

const SIGINFO = C.SIGINFO

SIGINT const #

const SIGINT = C.SIGINT

SIGINT const #

const SIGINT = C.SIGINT

SIGINT const #

const SIGINT = C.SIGINT

SIGINT const #

const SIGINT = C.SIGINT

SIGINT const #

const SIGINT = C.SIGINT

SIGINT const #

const SIGINT = C.SIGINT

SIGINT const #

const SIGINT = C.SIGINT

SIGINT const #

const SIGINT = C.SIGINT

SIGINT const #

const SIGINT = C.SIGINT

SIGIO const #

const SIGIO = C.SIGIO

SIGIO const #

const SIGIO = C.SIGIO

SIGIO const #

const SIGIO = C.SIGIO

SIGIO const #

const SIGIO = C.SIGIO

SIGIO const #

const SIGIO = C.SIGIO

SIGIO const #

const SIGIO = C.SIGIO

SIGIO const #

const SIGIO = C.SIGIO

SIGIO const #

const SIGIO = C.SIGIO

SIGIO const #

const SIGIO = C.SIGIO

SIGKILL const #

const SIGKILL = C.SIGKILL

SIGKILL const #

const SIGKILL = C.SIGKILL

SIGKILL const #

const SIGKILL = C.SIGKILL

SIGKILL const #

const SIGKILL = C.SIGKILL

SIGKILL const #

const SIGKILL = C.SIGKILL

SIGKILL const #

const SIGKILL = C.SIGKILL

SIGKILL const #

const SIGKILL = C.SIGKILL

SIGKILL const #

const SIGKILL = C.SIGKILL

SIGKILL const #

const SIGKILL = C.SIGKILL

SIGPIPE const #

const SIGPIPE = C.SIGPIPE

SIGPIPE const #

const SIGPIPE = C.SIGPIPE

SIGPIPE const #

const SIGPIPE = C.SIGPIPE

SIGPIPE const #

const SIGPIPE = C.SIGPIPE

SIGPIPE const #

const SIGPIPE = C.SIGPIPE

SIGPIPE const #

const SIGPIPE = C.SIGPIPE

SIGPIPE const #

const SIGPIPE = C.SIGPIPE

SIGPIPE const #

const SIGPIPE = C.SIGPIPE

SIGPIPE const #

const SIGPIPE = C.SIGPIPE

SIGPROF const #

const SIGPROF = C.SIGPROF

SIGPROF const #

const SIGPROF = C.SIGPROF

SIGPROF const #

const SIGPROF = C.SIGPROF

SIGPROF const #

const SIGPROF = C.SIGPROF

SIGPROF const #

const SIGPROF = C.SIGPROF

SIGPROF const #

const SIGPROF = C.SIGPROF

SIGPROF const #

const SIGPROF = C.SIGPROF

SIGPROF const #

const SIGPROF = C.SIGPROF

SIGPROF const #

const SIGPROF = C.SIGPROF

SIGPWR const #

const SIGPWR = C.SIGPWR

SIGPWR const #

const SIGPWR = C.SIGPWR

SIGPWR const #

const SIGPWR = C.SIGPWR

SIGQUIT const #

const SIGQUIT = C.SIGQUIT

SIGQUIT const #

const SIGQUIT = C.SIGQUIT

SIGQUIT const #

const SIGQUIT = C.SIGQUIT

SIGQUIT const #

const SIGQUIT = C.SIGQUIT

SIGQUIT const #

const SIGQUIT = C.SIGQUIT

SIGQUIT const #

const SIGQUIT = C.SIGQUIT

SIGQUIT const #

const SIGQUIT = C.SIGQUIT

SIGQUIT const #

const SIGQUIT = C.SIGQUIT

SIGQUIT const #

const SIGQUIT = C.SIGQUIT

SIGRTMIN const #

const SIGRTMIN = C.SIGRTMIN

SIGSEGV const #

const SIGSEGV = C.SIGSEGV

SIGSEGV const #

const SIGSEGV = C.SIGSEGV

SIGSEGV const #

const SIGSEGV = C.SIGSEGV

SIGSEGV const #

const SIGSEGV = C.SIGSEGV

SIGSEGV const #

const SIGSEGV = C.SIGSEGV

SIGSEGV const #

const SIGSEGV = C.SIGSEGV

SIGSEGV const #

const SIGSEGV = C.SIGSEGV

SIGSEGV const #

const SIGSEGV = C.SIGSEGV

SIGSEGV const #

const SIGSEGV = C.SIGSEGV

SIGSTKFLT const #

const SIGSTKFLT = C.SIGSTKFLT

SIGSTKFLT const #

const SIGSTKFLT = C.SIGSTKFLT

SIGSTKFLT const #

const SIGSTKFLT = C.SIGSTKFLT

SIGSTOP const #

const SIGSTOP = C.SIGSTOP

SIGSTOP const #

const SIGSTOP = C.SIGSTOP

SIGSTOP const #

const SIGSTOP = C.SIGSTOP

SIGSTOP const #

const SIGSTOP = C.SIGSTOP

SIGSTOP const #

const SIGSTOP = C.SIGSTOP

SIGSTOP const #

const SIGSTOP = C.SIGSTOP

SIGSTOP const #

const SIGSTOP = C.SIGSTOP

SIGSTOP const #

const SIGSTOP = C.SIGSTOP

SIGSTOP const #

const SIGSTOP = C.SIGSTOP

SIGSYS const #

const SIGSYS = C.SIGSYS

SIGSYS const #

const SIGSYS = C.SIGSYS

SIGSYS const #

const SIGSYS = C.SIGSYS

SIGSYS const #

const SIGSYS = C.SIGSYS

SIGSYS const #

const SIGSYS = C.SIGSYS

SIGSYS const #

const SIGSYS = C.SIGSYS

SIGSYS const #

const SIGSYS = C.SIGSYS

SIGSYS const #

const SIGSYS = C.SIGSYS

SIGSYS const #

const SIGSYS = C.SIGSYS

SIGTERM const #

const SIGTERM = C.SIGTERM

SIGTERM const #

const SIGTERM = C.SIGTERM

SIGTERM const #

const SIGTERM = C.SIGTERM

SIGTERM const #

const SIGTERM = C.SIGTERM

SIGTERM const #

const SIGTERM = C.SIGTERM

SIGTERM const #

const SIGTERM = C.SIGTERM

SIGTRAP const #

const SIGTRAP = C.SIGTRAP

SIGTRAP const #

const SIGTRAP = C.SIGTRAP

SIGTRAP const #

const SIGTRAP = C.SIGTRAP

SIGTRAP const #

const SIGTRAP = C.SIGTRAP

SIGTRAP const #

const SIGTRAP = C.SIGTRAP

SIGTRAP const #

const SIGTRAP = C.SIGTRAP

SIGTRAP const #

const SIGTRAP = C.SIGTRAP

SIGTRAP const #

const SIGTRAP = C.SIGTRAP

SIGTRAP const #

const SIGTRAP = C.SIGTRAP

SIGTSTP const #

const SIGTSTP = C.SIGTSTP

SIGTSTP const #

const SIGTSTP = C.SIGTSTP

SIGTSTP const #

const SIGTSTP = C.SIGTSTP

SIGTSTP const #

const SIGTSTP = C.SIGTSTP

SIGTSTP const #

const SIGTSTP = C.SIGTSTP

SIGTSTP const #

const SIGTSTP = C.SIGTSTP

SIGTSTP const #

const SIGTSTP = C.SIGTSTP

SIGTSTP const #

const SIGTSTP = C.SIGTSTP

SIGTSTP const #

const SIGTSTP = C.SIGTSTP

SIGTTIN const #

const SIGTTIN = C.SIGTTIN

SIGTTIN const #

const SIGTTIN = C.SIGTTIN

SIGTTIN const #

const SIGTTIN = C.SIGTTIN

SIGTTIN const #

const SIGTTIN = C.SIGTTIN

SIGTTIN const #

const SIGTTIN = C.SIGTTIN

SIGTTIN const #

const SIGTTIN = C.SIGTTIN

SIGTTIN const #

const SIGTTIN = C.SIGTTIN

SIGTTIN const #

const SIGTTIN = C.SIGTTIN

SIGTTIN const #

const SIGTTIN = C.SIGTTIN

SIGTTOU const #

const SIGTTOU = C.SIGTTOU

SIGTTOU const #

const SIGTTOU = C.SIGTTOU

SIGTTOU const #

const SIGTTOU = C.SIGTTOU

SIGTTOU const #

const SIGTTOU = C.SIGTTOU

SIGTTOU const #

const SIGTTOU = C.SIGTTOU

SIGTTOU const #

const SIGTTOU = C.SIGTTOU

SIGTTOU const #

const SIGTTOU = C.SIGTTOU

SIGTTOU const #

const SIGTTOU = C.SIGTTOU

SIGTTOU const #

const SIGTTOU = C.SIGTTOU

SIGURG const #

const SIGURG = C.SIGURG

SIGURG const #

const SIGURG = C.SIGURG

SIGURG const #

const SIGURG = C.SIGURG

SIGURG const #

const SIGURG = C.SIGURG

SIGURG const #

const SIGURG = C.SIGURG

SIGURG const #

const SIGURG = C.SIGURG

SIGURG const #

const SIGURG = C.SIGURG

SIGURG const #

const SIGURG = C.SIGURG

SIGURG const #

const SIGURG = C.SIGURG

SIGUSR1 const #

const SIGUSR1 = C.SIGUSR1

SIGUSR1 const #

const SIGUSR1 = C.SIGUSR1

SIGUSR1 const #

const SIGUSR1 = C.SIGUSR1

SIGUSR1 const #

const SIGUSR1 = C.SIGUSR1

SIGUSR1 const #

const SIGUSR1 = C.SIGUSR1

SIGUSR1 const #

const SIGUSR1 = C.SIGUSR1

SIGUSR1 const #

const SIGUSR1 = C.SIGUSR1

SIGUSR1 const #

const SIGUSR1 = C.SIGUSR1

SIGUSR1 const #

const SIGUSR1 = C.SIGUSR1

SIGUSR2 const #

const SIGUSR2 = C.SIGUSR2

SIGUSR2 const #

const SIGUSR2 = C.SIGUSR2

SIGUSR2 const #

const SIGUSR2 = C.SIGUSR2

SIGUSR2 const #

const SIGUSR2 = C.SIGUSR2

SIGUSR2 const #

const SIGUSR2 = C.SIGUSR2

SIGUSR2 const #

const SIGUSR2 = C.SIGUSR2

SIGUSR2 const #

const SIGUSR2 = C.SIGUSR2

SIGUSR2 const #

const SIGUSR2 = C.SIGUSR2

SIGUSR2 const #

const SIGUSR2 = C.SIGUSR2

SIGVTALRM const #

const SIGVTALRM = C.SIGVTALRM

SIGVTALRM const #

const SIGVTALRM = C.SIGVTALRM

SIGVTALRM const #

const SIGVTALRM = C.SIGVTALRM

SIGVTALRM const #

const SIGVTALRM = C.SIGVTALRM

SIGVTALRM const #

const SIGVTALRM = C.SIGVTALRM

SIGVTALRM const #

const SIGVTALRM = C.SIGVTALRM

SIGVTALRM const #

const SIGVTALRM = C.SIGVTALRM

SIGVTALRM const #

const SIGVTALRM = C.SIGVTALRM

SIGVTALRM const #

const SIGVTALRM = C.SIGVTALRM

SIGWINCH const #

const SIGWINCH = C.SIGWINCH

SIGWINCH const #

const SIGWINCH = C.SIGWINCH

SIGWINCH const #

const SIGWINCH = C.SIGWINCH

SIGWINCH const #

const SIGWINCH = C.SIGWINCH

SIGWINCH const #

const SIGWINCH = C.SIGWINCH

SIGWINCH const #

const SIGWINCH = C.SIGWINCH

SIGWINCH const #

const SIGWINCH = C.SIGWINCH

SIGWINCH const #

const SIGWINCH = C.SIGWINCH

SIGWINCH const #

const SIGWINCH = C.SIGWINCH

SIGXCPU const #

const SIGXCPU = C.SIGXCPU

SIGXCPU const #

const SIGXCPU = C.SIGXCPU

SIGXCPU const #

const SIGXCPU = C.SIGXCPU

SIGXCPU const #

const SIGXCPU = C.SIGXCPU

SIGXCPU const #

const SIGXCPU = C.SIGXCPU

SIGXCPU const #

const SIGXCPU = C.SIGXCPU

SIGXCPU const #

const SIGXCPU = C.SIGXCPU

SIGXCPU const #

const SIGXCPU = C.SIGXCPU

SIGXCPU const #

const SIGXCPU = C.SIGXCPU

SIGXFSZ const #

const SIGXFSZ = C.SIGXFSZ

SIGXFSZ const #

const SIGXFSZ = C.SIGXFSZ

SIGXFSZ const #

const SIGXFSZ = C.SIGXFSZ

SIGXFSZ const #

const SIGXFSZ = C.SIGXFSZ

SIGXFSZ const #

const SIGXFSZ = C.SIGXFSZ

SIGXFSZ const #

const SIGXFSZ = C.SIGXFSZ

SIGXFSZ const #

const SIGXFSZ = C.SIGXFSZ

SIGXFSZ const #

const SIGXFSZ = C.SIGXFSZ

SIGXFSZ const #

const SIGXFSZ = C.SIGXFSZ

SI_KERNEL const #

const SI_KERNEL = C.SI_KERNEL

SI_TIMER const #

const SI_TIMER = C.SI_TIMER

UMTX_OP_WAIT_UINT const #

const UMTX_OP_WAIT_UINT = C.UMTX_OP_WAIT_UINT

UMTX_OP_WAIT_UINT_PRIVATE const #

const UMTX_OP_WAIT_UINT_PRIVATE = C.UMTX_OP_WAIT_UINT_PRIVATE

UMTX_OP_WAKE const #

const UMTX_OP_WAKE = C.UMTX_OP_WAKE

UMTX_OP_WAKE_PRIVATE const #

const UMTX_OP_WAKE_PRIVATE = C.UMTX_OP_WAKE_PRIVATE

VM_REGION_BASIC_INFO_64 const #

const VM_REGION_BASIC_INFO_64 = C.VM_REGION_BASIC_INFO_64

VM_REGION_BASIC_INFO_COUNT_64 const #

const VM_REGION_BASIC_INFO_COUNT_64 = C.VM_REGION_BASIC_INFO_COUNT_64

_ var #

Following syscalls are available on every Windows PC. All these variables are set by the Windows executable loader before the Go program starts.

var _ stdFunction

_ const #

const _ selectDir = iota

_ var #

These are from non-kernel32.dll, so we prefer to LoadLibraryEx them.

var _ stdFunction

_ const #

const _

_ const #

Experimental events.

const _ traceEv = *ast.BinaryExpr

_64bit const #

_64bit = 1 on 64-bit systems, 0 on 32-bit systems

const _64bit = *ast.BinaryExpr

_AF_UNIX const #

Constants

const _AF_UNIX = 0x1

_AF_UNIX const #

const _AF_UNIX = 0x1

_AF_UNIX const #

const _AF_UNIX = 0x1

_AF_UNIX const #

const _AF_UNIX = 0x1

_AT_HWCAP const #

const _AT_HWCAP = 16

_AT_HWCAP const #

const _AT_HWCAP = 25

_AT_HWCAP2 const #

const _AT_HWCAP2 = 26

_AT_HWCAP2 const #

const _AT_HWCAP2 = 26

_AT_NULL const #

const _AT_NULL = 0

_AT_NULL const #

const _AT_NULL = 0

_AT_NULL const #

const _AT_NULL = 0

_AT_NULL const #

const _AT_NULL = 0

_AT_NULL const #

const _AT_NULL = 0

_AT_PAGESZ const #

const _AT_PAGESZ = 6

_AT_PAGESZ const #

const _AT_PAGESZ = 6

_AT_PAGESZ const #

const _AT_PAGESZ = 6

_AT_PAGESZ const #

const _AT_PAGESZ = 6

_AT_PAGESZ const #

const _AT_PAGESZ = 6

_AT_PLATFORM const #

const _AT_PLATFORM = 15

_AT_PLATFORM const #

const _AT_PLATFORM = 15

_AT_RANDOM const #

const _AT_RANDOM = 25

_AT_SECURE const #

const _AT_SECURE = 23

_AT_SUN_EXECNAME const #

const _AT_SUN_EXECNAME = 2014

_AT_SYSINFO_EHDR const #

const _AT_SYSINFO_EHDR = 33

_AT_TIMEKEEP const #

const _AT_TIMEKEEP = 22

_AddVectoredContinueHandler var #

Following syscalls are available on every Windows PC. All these variables are set by the Windows executable loader before the Go program starts.

var _AddVectoredContinueHandler stdFunction

_AddVectoredExceptionHandler var #

Following syscalls are available on every Windows PC. All these variables are set by the Windows executable loader before the Go program starts.

var _AddVectoredExceptionHandler stdFunction

_BUS_ADRALN const #

const _BUS_ADRALN = 0x1

_BUS_ADRALN const #

const _BUS_ADRALN = 0x1

_BUS_ADRALN const #

const _BUS_ADRALN = 0x1

_BUS_ADRALN const #

const _BUS_ADRALN = 0x1

_BUS_ADRALN const #

const _BUS_ADRALN = 0x1

_BUS_ADRALN const #

const _BUS_ADRALN = 0x1

_BUS_ADRALN const #

const _BUS_ADRALN = 0x1

_BUS_ADRALN const #

const _BUS_ADRALN = 0x1

_BUS_ADRALN const #

const _BUS_ADRALN = 0x1

_BUS_ADRALN const #

const _BUS_ADRALN = 0x1

_BUS_ADRALN const #

const _BUS_ADRALN = 0x1

_BUS_ADRALN const #

const _BUS_ADRALN = 0x1

_BUS_ADRALN const #

const _BUS_ADRALN = 0x1

_BUS_ADRALN const #

const _BUS_ADRALN = 0x1

_BUS_ADRALN const #

const _BUS_ADRALN = 0x1

_BUS_ADRALN const #

const _BUS_ADRALN = 0x1

_BUS_ADRALN const #

const _BUS_ADRALN = 0x1

_BUS_ADRALN const #

const _BUS_ADRALN = 0x1

_BUS_ADRALN const #

Constants

const _BUS_ADRALN = 0x1

_BUS_ADRALN const #

const _BUS_ADRALN = 0x1

_BUS_ADRALN const #

const _BUS_ADRALN = 0x1

_BUS_ADRALN const #

const _BUS_ADRALN = 0x1

_BUS_ADRALN const #

const _BUS_ADRALN = 0x1

_BUS_ADRALN const #

const _BUS_ADRALN = 0x1

_BUS_ADRALN const #

const _BUS_ADRALN = C.BUS_ADRALN

_BUS_ADRALN const #

const _BUS_ADRALN = 0x1

_BUS_ADRALN const #

const _BUS_ADRALN = 0x1

_BUS_ADRALN const #

const _BUS_ADRALN = 0x1

_BUS_ADRALN const #

const _BUS_ADRALN = 0x1

_BUS_ADRALN const #

const _BUS_ADRALN = 0x1

_BUS_ADRALN const #

const _BUS_ADRALN = 0x1

_BUS_ADRALN const #

const _BUS_ADRALN = 0x1

_BUS_ADRALN const #

const _BUS_ADRALN = 0x1

_BUS_ADRERR const #

const _BUS_ADRERR = 0x2

_BUS_ADRERR const #

const _BUS_ADRERR = 0x2

_BUS_ADRERR const #

const _BUS_ADRERR = 0x2

_BUS_ADRERR const #

const _BUS_ADRERR = 0x2

_BUS_ADRERR const #

const _BUS_ADRERR = 0x2

_BUS_ADRERR const #

const _BUS_ADRERR = 0x2

_BUS_ADRERR const #

const _BUS_ADRERR = 0x2

_BUS_ADRERR const #

const _BUS_ADRERR = 0x2

_BUS_ADRERR const #

const _BUS_ADRERR = 0x2

_BUS_ADRERR const #

const _BUS_ADRERR = C.BUS_ADRERR

_BUS_ADRERR const #

const _BUS_ADRERR = 0x2

_BUS_ADRERR const #

const _BUS_ADRERR = 0x2

_BUS_ADRERR const #

const _BUS_ADRERR = 0x2

_BUS_ADRERR const #

const _BUS_ADRERR = 0x2

_BUS_ADRERR const #

const _BUS_ADRERR = 0x2

_BUS_ADRERR const #

const _BUS_ADRERR = 0x2

_BUS_ADRERR const #

const _BUS_ADRERR = 0x2

_BUS_ADRERR const #

Constants

const _BUS_ADRERR = 0x2

_BUS_ADRERR const #

const _BUS_ADRERR = 0x2

_BUS_ADRERR const #

const _BUS_ADRERR = 0x2

_BUS_ADRERR const #

const _BUS_ADRERR = 0x2

_BUS_ADRERR const #

const _BUS_ADRERR = 0x2

_BUS_ADRERR const #

const _BUS_ADRERR = 0x2

_BUS_ADRERR const #

const _BUS_ADRERR = 0x2

_BUS_ADRERR const #

const _BUS_ADRERR = 0x2

_BUS_ADRERR const #

const _BUS_ADRERR = 0x2

_BUS_ADRERR const #

const _BUS_ADRERR = 0x2

_BUS_ADRERR const #

const _BUS_ADRERR = 0x2

_BUS_ADRERR const #

const _BUS_ADRERR = 0x2

_BUS_ADRERR const #

const _BUS_ADRERR = 0x2

_BUS_ADRERR const #

const _BUS_ADRERR = 0x2

_BUS_ADRERR const #

const _BUS_ADRERR = 0x2

_BUS_ADRERR const #

const _BUS_ADRERR = 0x2

_BUS_OBJERR const #

const _BUS_OBJERR = C.BUS_OBJERR

_BUS_OBJERR const #

const _BUS_OBJERR = 0x3

_BUS_OBJERR const #

const _BUS_OBJERR = 0x3

_BUS_OBJERR const #

const _BUS_OBJERR = 0x3

_BUS_OBJERR const #

const _BUS_OBJERR = 0x3

_BUS_OBJERR const #

const _BUS_OBJERR = 0x3

_BUS_OBJERR const #

const _BUS_OBJERR = 0x3

_BUS_OBJERR const #

const _BUS_OBJERR = 0x3

_BUS_OBJERR const #

const _BUS_OBJERR = 0x3

_BUS_OBJERR const #

const _BUS_OBJERR = 0x3

_BUS_OBJERR const #

const _BUS_OBJERR = 0x3

_BUS_OBJERR const #

const _BUS_OBJERR = 0x3

_BUS_OBJERR const #

const _BUS_OBJERR = 0x3

_BUS_OBJERR const #

const _BUS_OBJERR = 0x3

_BUS_OBJERR const #

const _BUS_OBJERR = 0x3

_BUS_OBJERR const #

const _BUS_OBJERR = 0x3

_BUS_OBJERR const #

const _BUS_OBJERR = 0x3

_BUS_OBJERR const #

const _BUS_OBJERR = 0x3

_BUS_OBJERR const #

const _BUS_OBJERR = 0x3

_BUS_OBJERR const #

const _BUS_OBJERR = 0x3

_BUS_OBJERR const #

Constants

const _BUS_OBJERR = 0x3

_BUS_OBJERR const #

const _BUS_OBJERR = 0x3

_BUS_OBJERR const #

const _BUS_OBJERR = 0x3

_BUS_OBJERR const #

const _BUS_OBJERR = 0x3

_BUS_OBJERR const #

const _BUS_OBJERR = 0x3

_BUS_OBJERR const #

const _BUS_OBJERR = 0x3

_BUS_OBJERR const #

const _BUS_OBJERR = 0x3

_BUS_OBJERR const #

const _BUS_OBJERR = 0x3

_BUS_OBJERR const #

const _BUS_OBJERR = 0x3

_BUS_OBJERR const #

const _BUS_OBJERR = 0x3

_BUS_OBJERR const #

const _BUS_OBJERR = 0x3

_BUS_OBJERR const #

const _BUS_OBJERR = 0x3

_BUS_OBJERR const #

const _BUS_OBJERR = 0x3

_CLOCK_MONOTONIC const #

const _CLOCK_MONOTONIC = 0x4

_CLOCK_MONOTONIC const #

const _CLOCK_MONOTONIC = 0x4

_CLOCK_MONOTONIC const #

const _CLOCK_MONOTONIC = 0x4

_CLOCK_MONOTONIC const #

const _CLOCK_MONOTONIC = 0x4

_CLOCK_MONOTONIC const #

const _CLOCK_MONOTONIC = 10

_CLOCK_MONOTONIC const #

const _CLOCK_MONOTONIC = 0x4

_CLOCK_MONOTONIC const #

const _CLOCK_MONOTONIC = 4

_CLOCK_MONOTONIC const #

const _CLOCK_MONOTONIC = 3

_CLOCK_MONOTONIC const #

const _CLOCK_MONOTONIC = 3

_CLOCK_PROF const #

const _CLOCK_PROF = 2

_CLOCK_PROF const #

const _CLOCK_PROF = 2

_CLOCK_REALTIME const #

const _CLOCK_REALTIME = 0x0

_CLOCK_REALTIME const #

From OpenBSD's sys/time.h

const _CLOCK_REALTIME = 0

_CLOCK_REALTIME const #

From NetBSD's

const _CLOCK_REALTIME = 0

_CLOCK_REALTIME const #

const _CLOCK_REALTIME = 3

_CLOCK_REALTIME const #

const _CLOCK_REALTIME = 0x0

_CLOCK_REALTIME const #

const _CLOCK_REALTIME = 0x0

_CLOCK_REALTIME const #

const _CLOCK_REALTIME = 0x0

_CLOCK_REALTIME const #

const _CLOCK_REALTIME = 9

_CLOCK_REALTIME const #

const _CLOCK_REALTIME = 0x0

_CLOCK_THREAD_CPUTIME_ID const #

const _CLOCK_THREAD_CPUTIME_ID = 0x3

_CLOCK_THREAD_CPUTIME_ID const #

const _CLOCK_THREAD_CPUTIME_ID = 0x3

_CLOCK_THREAD_CPUTIME_ID const #

const _CLOCK_THREAD_CPUTIME_ID = 0x3

_CLOCK_THREAD_CPUTIME_ID const #

const _CLOCK_THREAD_CPUTIME_ID = 0x3

_CLOCK_THREAD_CPUTIME_ID const #

const _CLOCK_THREAD_CPUTIME_ID = 0x3

_CLOCK_THREAD_CPUTIME_ID const #

Constants

const _CLOCK_THREAD_CPUTIME_ID = 0x3

_CLOCK_THREAD_CPUTIME_ID const #

const _CLOCK_THREAD_CPUTIME_ID = 0x3

_CLOCK_THREAD_CPUTIME_ID const #

const _CLOCK_THREAD_CPUTIME_ID = 0x3

_CLOCK_THREAD_CPUTIME_ID const #

const _CLOCK_THREAD_CPUTIME_ID = 0x3

_CLOCK_THREAD_CPUTIME_ID const #

const _CLOCK_THREAD_CPUTIME_ID = 0x3

_CLOCK_THREAD_CPUTIME_ID const #

const _CLOCK_THREAD_CPUTIME_ID = 0x3

_CLOCK_VIRTUAL const #

const _CLOCK_VIRTUAL = 1

_CLOCK_VIRTUAL const #

const _CLOCK_VIRTUAL = 1

_CLONE_CHILD_CLEARTID const #

Clone, the Linux rfork.

const _CLONE_CHILD_CLEARTID = 0x200000

_CLONE_CHILD_SETTID const #

Clone, the Linux rfork.

const _CLONE_CHILD_SETTID = 0x1000000

_CLONE_FILES const #

Clone, the Linux rfork.

const _CLONE_FILES = 0x400

_CLONE_FS const #

Clone, the Linux rfork.

const _CLONE_FS = 0x200

_CLONE_NEWIPC const #

Clone, the Linux rfork.

const _CLONE_NEWIPC = 0x8000000

_CLONE_NEWNS const #

Clone, the Linux rfork.

const _CLONE_NEWNS = 0x20000

_CLONE_NEWUTS const #

Clone, the Linux rfork.

const _CLONE_NEWUTS = 0x4000000

_CLONE_PARENT const #

Clone, the Linux rfork.

const _CLONE_PARENT = 0x8000

_CLONE_PARENT_SETTID const #

Clone, the Linux rfork.

const _CLONE_PARENT_SETTID = 0x100000

_CLONE_PTRACE const #

Clone, the Linux rfork.

const _CLONE_PTRACE = 0x2000

_CLONE_SETTLS const #

Clone, the Linux rfork.

const _CLONE_SETTLS = 0x80000

_CLONE_SIGHAND const #

Clone, the Linux rfork.

const _CLONE_SIGHAND = 0x800

_CLONE_STOPPED const #

Clone, the Linux rfork.

const _CLONE_STOPPED = 0x2000000

_CLONE_SYSVSEM const #

Clone, the Linux rfork.

const _CLONE_SYSVSEM = 0x40000

_CLONE_THREAD const #

Clone, the Linux rfork.

const _CLONE_THREAD = 0x10000

_CLONE_UNTRACED const #

Clone, the Linux rfork.

const _CLONE_UNTRACED = 0x800000

_CLONE_VFORK const #

Clone, the Linux rfork.

const _CLONE_VFORK = 0x4000

_CLONE_VM const #

Clone, the Linux rfork.

const _CLONE_VM = 0x100

_CONTEXT_CONTROL const #

NOTE(rsc): _CONTEXT_CONTROL is actually 0x400001 and should include PC, SP, and LR. However, empirically, LR doesn't come along on Windows 10 unless you also set _CONTEXT_INTEGER (0x400002). Without LR, we skip over the next-to-bottom function in profiles when the bottom function is frameless. So we set both here, to make a working _CONTEXT_CONTROL.

const _CONTEXT_CONTROL = 0x400003

_CONTEXT_CONTROL const #

const _CONTEXT_CONTROL = 0x100001

_CONTEXT_CONTROL const #

const _CONTEXT_CONTROL = 0x10001

_CONTEXT_CONTROL const #

NOTE(rsc): _CONTEXT_CONTROL is actually 0x200001 and should include PC, SP, and LR. However, empirically, LR doesn't come along on Windows 10 unless you also set _CONTEXT_INTEGER (0x200002). Without LR, we skip over the next-to-bottom function in profiles when the bottom function is frameless. So we set both here, to make a working _CONTEXT_CONTROL.

const _CONTEXT_CONTROL = 0x200003

_CPU_CURRENT_PID const #

const _CPU_CURRENT_PID = *ast.UnaryExpr

_CPU_LEVEL_WHICH const #

const _CPU_LEVEL_WHICH = 0x3

_CPU_LEVEL_WHICH const #

const _CPU_LEVEL_WHICH = 0x3

_CPU_LEVEL_WHICH const #

Local consts.

const _CPU_LEVEL_WHICH = C.CPU_LEVEL_WHICH

_CPU_LEVEL_WHICH const #

const _CPU_LEVEL_WHICH = 0x3

_CPU_LEVEL_WHICH const #

const _CPU_LEVEL_WHICH = 0x3

_CPU_LEVEL_WHICH const #

const _CPU_LEVEL_WHICH = 0x3

_CPU_WHICH_PID const #

const _CPU_WHICH_PID = 0x2

_CPU_WHICH_PID const #

const _CPU_WHICH_PID = 0x2

_CPU_WHICH_PID const #

const _CPU_WHICH_PID = 0x2

_CPU_WHICH_PID const #

const _CPU_WHICH_PID = 0x2

_CPU_WHICH_PID const #

const _CPU_WHICH_PID = 0x2

_CPU_WHICH_PID const #

Local consts.

const _CPU_WHICH_PID = C.CPU_WHICH_PID

_CTL_HW const #

From DragonFly's

const _CTL_HW = 6

_CTL_HW const #

const _CTL_HW = 6

_CTL_HW const #

From OpenBSD's

const _CTL_HW = 6

_CTL_HW const #

From NetBSD's

const _CTL_HW = 6

_CTL_HW const #

From FreeBSD's

const _CTL_HW = 6

_CTL_KERN const #

From NetBSD's

const _CTL_KERN = 1

_CTL_MAXNAME const #

const _CTL_MAXNAME = 0x18

_CTL_MAXNAME const #

const _CTL_MAXNAME = 0x18

_CTL_MAXNAME const #

const _CTL_MAXNAME = 0x18

_CTL_MAXNAME const #

const _CTL_MAXNAME = 0x18

_CTL_MAXNAME const #

const _CTL_MAXNAME = 0x18

_CTL_MAXNAME const #

Local consts.

const _CTL_MAXNAME = C.CTL_MAXNAME

_CTL_QUERY const #

Undocumented numbers from FreeBSD's lib/libc/gen/sysctlnametomib.c.

const _CTL_QUERY = 0

_CTL_QUERY_MIB const #

Undocumented numbers from FreeBSD's lib/libc/gen/sysctlnametomib.c.

const _CTL_QUERY_MIB = 3

_CTRL_BREAK_EVENT const #

const _CTRL_BREAK_EVENT = 0x1

_CTRL_CLOSE_EVENT const #

const _CTRL_CLOSE_EVENT = 0x2

_CTRL_C_EVENT const #

const _CTRL_C_EVENT = 0x0

_CTRL_LOGOFF_EVENT const #

const _CTRL_LOGOFF_EVENT = 0x5

_CTRL_SHUTDOWN_EVENT const #

const _CTRL_SHUTDOWN_EVENT = 0x6

_CloseHandle var #

Following syscalls are available on every Windows PC. All these variables are set by the Windows executable loader before the Go program starts.

var _CloseHandle stdFunction

_CreateEventA var #

Following syscalls are available on every Windows PC. All these variables are set by the Windows executable loader before the Go program starts.

var _CreateEventA stdFunction

_CreateIoCompletionPort var #

Following syscalls are available on every Windows PC. All these variables are set by the Windows executable loader before the Go program starts.

var _CreateIoCompletionPort stdFunction

_CreateThread var #

Following syscalls are available on every Windows PC. All these variables are set by the Windows executable loader before the Go program starts.

var _CreateThread stdFunction

_CreateWaitableTimerA var #

Following syscalls are available on every Windows PC. All these variables are set by the Windows executable loader before the Go program starts.

var _CreateWaitableTimerA stdFunction

_CreateWaitableTimerExW var #

Following syscalls are available on every Windows PC. All these variables are set by the Windows executable loader before the Go program starts.

var _CreateWaitableTimerExW stdFunction

_DT_GNU_HASH const #

const _DT_GNU_HASH = 0x6ffffef5

_DT_HASH const #

const _DT_HASH = 4

_DT_NULL const #

const _DT_NULL = 0

_DT_STRTAB const #

const _DT_STRTAB = 5

_DT_SYMTAB const #

const _DT_SYMTAB = 6

_DT_VERDEF const #

const _DT_VERDEF = 0x6ffffffc

_DT_VERSYM const #

const _DT_VERSYM = 0x6ffffff0

_DUPLICATE_SAME_ACCESS const #

const _DUPLICATE_SAME_ACCESS = 0x2

_DWORD_MAX const #

const _DWORD_MAX = 0xffffffff

_DebugGC const #

const _DebugGC = 0

_DuplicateHandle var #

Following syscalls are available on every Windows PC. All these variables are set by the Windows executable loader before the Go program starts.

var _DuplicateHandle stdFunction

_EACCES const #

const _EACCES = C.EACCES

_EACCES const #

const _EACCES = 13

_EACCES const #

const _EACCES = 0xd

_EAGAIN const #

const _EAGAIN = 0x23

_EAGAIN const #

const _EAGAIN = 0xb

_EAGAIN const #

const _EAGAIN = 0xb

_EAGAIN const #

const _EAGAIN = 0x23

_EAGAIN const #

const _EAGAIN = 0xb

_EAGAIN const #

const _EAGAIN = 0x23

_EAGAIN const #

const _EAGAIN = 0x23

_EAGAIN const #

const _EAGAIN = 0x23

_EAGAIN const #

const _EAGAIN = 0x23

_EAGAIN const #

const _EAGAIN = 0xb

_EAGAIN const #

const _EAGAIN = 0x23

_EAGAIN const #

const _EAGAIN = 0x23

_EAGAIN const #

const _EAGAIN = 0x23

_EAGAIN const #

const _EAGAIN = 0xb

_EAGAIN const #

const _EAGAIN = 0x23

_EAGAIN const #

const _EAGAIN = 0xb

_EAGAIN const #

const _EAGAIN = 0x23

_EAGAIN const #

const _EAGAIN = 0xb

_EAGAIN const #

const _EAGAIN = C.EAGAIN

_EAGAIN const #

const _EAGAIN = 0xb

_EAGAIN const #

const _EAGAIN = 0x23

_EAGAIN const #

const _EAGAIN = 0x23

_EAGAIN const #

const _EAGAIN = 0x23

_EAGAIN const #

const _EAGAIN = 0x23

_EAGAIN const #

const _EAGAIN = 0xb

_EAGAIN const #

const _EAGAIN = 0x23

_EAGAIN const #

const _EAGAIN = 0xb

_EAGAIN const #

const _EAGAIN = 0xb

_EAGAIN const #

Constants

const _EAGAIN = 0xb

_EAGAIN const #

const _EAGAIN = 0x23

_EAGAIN const #

const _EAGAIN = 0x23

_EAGAIN const #

const _EAGAIN = 0xb

_EAGAIN const #

const _EAGAIN = 0x23

_EBADF const #

const _EBADF = 0x9

_EBUSY const #

const _EBUSY = 0x10

_EBUSY const #

const _EBUSY = 0x10

_EFAULT const #

const _EFAULT = 0xe

_EFAULT const #

const _EFAULT = 0xe

_EFAULT const #

const _EFAULT = 0xe

_EFAULT const #

const _EFAULT = 0xe

_EFAULT const #

const _EFAULT = 0xe

_EFAULT const #

const _EFAULT = 0xe

_EFAULT const #

const _EFAULT = 0xe

_EFAULT const #

const _EFAULT = 0xe

_EFAULT const #

const _EFAULT = 0xe

_EFAULT const #

const _EFAULT = 0xe

_EFAULT const #

const _EFAULT = C.EFAULT

_EFAULT const #

const _EFAULT = 0xe

_EFAULT const #

const _EFAULT = 0xe

_EFAULT const #

const _EFAULT = 0xe

_EFAULT const #

const _EFAULT = 0xe

_EFAULT const #

const _EFAULT = 0xe

_EFAULT const #

const _EFAULT = 0xe

_EFAULT const #

const _EFAULT = 0xe

_EFAULT const #

const _EFAULT = 0xe

_EFAULT const #

const _EFAULT = 0xe

_EFAULT const #

const _EFAULT = 0xe

_EFAULT const #

const _EFAULT = 0xe

_EINPROGRESS const #

const _EINPROGRESS = 0x96

_EINTR const #

const _EINTR = 0x4

_EINTR const #

Constants

const _EINTR = 0x4

_EINTR const #

const _EINTR = 0x4

_EINTR const #

const _EINTR = 0x4

_EINTR const #

const _EINTR = 0x4

_EINTR const #

const _EINTR = 0x4

_EINTR const #

const _EINTR = 0x4

_EINTR const #

const _EINTR = 0x4

_EINTR const #

const _EINTR = 0x4

_EINTR const #

const _EINTR = 0x4

_EINTR const #

const _EINTR = 0x4

_EINTR const #

const _EINTR = 0x4

_EINTR const #

const _EINTR = 0x4

_EINTR const #

const _EINTR = 0x4

_EINTR const #

const _EINTR = 27

_EINTR const #

const _EINTR = 0x4

_EINTR const #

const _EINTR = 0x4

_EINTR const #

const _EINTR = 0x4

_EINTR const #

const _EINTR = 0x4

_EINTR const #

const _EINTR = 0x4

_EINTR const #

const _EINTR = 0x4

_EINTR const #

const _EINTR = 0x4

_EINTR const #

const _EINTR = 0x4

_EINTR const #

const _EINTR = 0x4

_EINTR const #

const _EINTR = 0x4

_EINTR const #

const _EINTR = 0x4

_EINTR const #

const _EINTR = 0x4

_EINTR const #

const _EINTR = 0x4

_EINTR const #

const _EINTR = C.EINTR

_EINTR const #

const _EINTR = 0x4

_EINTR const #

const _EINTR = 0x4

_EINTR const #

const _EINTR = 0x4

_EINTR const #

const _EINTR = 0x4

_EINTR const #

const _EINTR = 0x4

_EINVAL const #

const _EINVAL = C.EINVAL

_EINVAL const #

const _EINVAL = 0x16

_EINVAL const #

const _EINVAL = 22

_EI_NIDENT const #

const _EI_NIDENT = 16

_ENOENT const #

const _ENOENT = C.ENOENT

_ENOENT const #

const _ENOENT = 0x2

_ENOMEM const #

const _ENOMEM = 0xc

_ENOMEM const #

const _ENOMEM = 12

_ENOMEM const #

const _ENOMEM = 12

_ENOMEM const #

const _ENOMEM = 0xc

_ENOMEM const #

const _ENOMEM = 0xc

_ENOMEM const #

const _ENOMEM = 0xc

_ENOMEM const #

const _ENOMEM = 0xc

_ENOMEM const #

const _ENOMEM = C.ENOMEM

_ENOMEM const #

const _ENOMEM = 0xc

_ENOMEM const #

Constants

const _ENOMEM = 0xc

_ENOMEM const #

const _ENOMEM = 0xc

_ENOMEM const #

const _ENOMEM = 0xc

_ENOMEM const #

const _ENOMEM = 0xc

_ENOMEM const #

const _ENOMEM = 0xc

_ENOMEM const #

const _ENOMEM = 0xc

_ENOTSUP const #

const _ENOTSUP = 91

_EPERM const #

const _EPERM = 0x1

_EPERM const #

const _EPERM = C.EPERM

_ERRMAX const #

const _ERRMAX = 128

_ERROR_COMMITMENT_LIMIT const #

const _ERROR_COMMITMENT_LIMIT = 1455

_ERROR_NOT_ENOUGH_MEMORY const #

const _ERROR_NOT_ENOUGH_MEMORY = 8

_ESRCH const #

const _ESRCH = 3

_ESRCH const #

const _ESRCH = 3

_ETIME const #

const _ETIME = 0x3e

_ETIMEDOUT const #

const _ETIMEDOUT = C.ETIMEDOUT

_ETIMEDOUT const #

const _ETIMEDOUT = 60

_ETIMEDOUT const #

const _ETIMEDOUT = 0x3c

_ETIMEDOUT const #

const _ETIMEDOUT = 0x3c

_ETIMEDOUT const #

const _ETIMEDOUT = 0x3c

_ETIMEDOUT const #

const _ETIMEDOUT = 0x3c

_ETIMEDOUT const #

const _ETIMEDOUT = 0x3c

_ETIMEDOUT const #

const _ETIMEDOUT = 0x3c

_ETIMEDOUT const #

const _ETIMEDOUT = 0x3c

_ETIMEDOUT const #

const _ETIMEDOUT = 0x3c

_ETIMEDOUT const #

const _ETIMEDOUT = 0x3c

_ETIMEDOUT const #

const _ETIMEDOUT = 0x3c

_ETIMEDOUT const #

const _ETIMEDOUT = 0x3c

_ETIMEDOUT const #

const _ETIMEDOUT = 0x91

_ETIMEDOUT const #

const _ETIMEDOUT = 0x4e

_ETIMEDOUT const #

const _ETIMEDOUT = 0x3c

_ETIMEDOUT const #

const _ETIMEDOUT = 0x3c

_ETIMEDOUT const #

const _ETIMEDOUT = 0x3c

_ETIMEDOUT const #

const _ETIMEDOUT = 0x3c

_EVFILT_READ const #

const _EVFILT_READ = *ast.UnaryExpr

_EVFILT_READ const #

const _EVFILT_READ = *ast.UnaryExpr

_EVFILT_READ const #

const _EVFILT_READ = *ast.UnaryExpr

_EVFILT_READ const #

const _EVFILT_READ = *ast.UnaryExpr

_EVFILT_READ const #

const _EVFILT_READ = *ast.UnaryExpr

_EVFILT_READ const #

const _EVFILT_READ = *ast.UnaryExpr

_EVFILT_READ const #

const _EVFILT_READ = *ast.UnaryExpr

_EVFILT_READ const #

const _EVFILT_READ = *ast.UnaryExpr

_EVFILT_READ const #

const _EVFILT_READ = 0x0

_EVFILT_READ const #

const _EVFILT_READ = *ast.UnaryExpr

_EVFILT_READ const #

const _EVFILT_READ = 0x0

_EVFILT_READ const #

const _EVFILT_READ = *ast.UnaryExpr

_EVFILT_READ const #

const _EVFILT_READ = *ast.UnaryExpr

_EVFILT_READ const #

const _EVFILT_READ = *ast.UnaryExpr

_EVFILT_READ const #

const _EVFILT_READ = *ast.UnaryExpr

_EVFILT_READ const #

const _EVFILT_READ = *ast.UnaryExpr

_EVFILT_READ const #

const _EVFILT_READ = *ast.UnaryExpr

_EVFILT_READ const #

const _EVFILT_READ = 0x0

_EVFILT_READ const #

const _EVFILT_READ = 0x0

_EVFILT_USER const #

const _EVFILT_USER = 0x8

_EVFILT_USER const #

const _EVFILT_USER = *ast.UnaryExpr

_EVFILT_USER const #

const _EVFILT_USER = 0x8

_EVFILT_USER const #

const _EVFILT_USER = *ast.UnaryExpr

_EVFILT_USER const #

const _EVFILT_USER = *ast.UnaryExpr

_EVFILT_USER const #

const _EVFILT_USER = *ast.UnaryExpr

_EVFILT_USER const #

const _EVFILT_USER = *ast.UnaryExpr

_EVFILT_USER const #

const _EVFILT_USER = 0x8

_EVFILT_USER const #

const _EVFILT_USER = *ast.UnaryExpr

_EVFILT_USER const #

const _EVFILT_USER = 0x8

_EVFILT_USER const #

const _EVFILT_USER = *ast.UnaryExpr

_EVFILT_USER const #

const _EVFILT_USER = *ast.UnaryExpr

_EVFILT_WRITE const #

const _EVFILT_WRITE = 0x1

_EVFILT_WRITE const #

const _EVFILT_WRITE = *ast.UnaryExpr

_EVFILT_WRITE const #

const _EVFILT_WRITE = *ast.UnaryExpr

_EVFILT_WRITE const #

const _EVFILT_WRITE = *ast.UnaryExpr

_EVFILT_WRITE const #

const _EVFILT_WRITE = *ast.UnaryExpr

_EVFILT_WRITE const #

const _EVFILT_WRITE = 0x1

_EVFILT_WRITE const #

const _EVFILT_WRITE = *ast.UnaryExpr

_EVFILT_WRITE const #

const _EVFILT_WRITE = 0x1

_EVFILT_WRITE const #

const _EVFILT_WRITE = *ast.UnaryExpr

_EVFILT_WRITE const #

const _EVFILT_WRITE = *ast.UnaryExpr

_EVFILT_WRITE const #

const _EVFILT_WRITE = *ast.UnaryExpr

_EVFILT_WRITE const #

const _EVFILT_WRITE = *ast.UnaryExpr

_EVFILT_WRITE const #

const _EVFILT_WRITE = *ast.UnaryExpr

_EVFILT_WRITE const #

const _EVFILT_WRITE = *ast.UnaryExpr

_EVFILT_WRITE const #

const _EVFILT_WRITE = *ast.UnaryExpr

_EVFILT_WRITE const #

const _EVFILT_WRITE = *ast.UnaryExpr

_EVFILT_WRITE const #

const _EVFILT_WRITE = *ast.UnaryExpr

_EVFILT_WRITE const #

const _EVFILT_WRITE = *ast.UnaryExpr

_EVFILT_WRITE const #

const _EVFILT_WRITE = 0x1

_EV_ADD const #

const _EV_ADD = 0x1

_EV_ADD const #

const _EV_ADD = 0x1

_EV_ADD const #

const _EV_ADD = 0x1

_EV_ADD const #

const _EV_ADD = 0x1

_EV_ADD const #

const _EV_ADD = 0x1

_EV_ADD const #

const _EV_ADD = 0x1

_EV_ADD const #

const _EV_ADD = 0x1

_EV_ADD const #

const _EV_ADD = 0x1

_EV_ADD const #

const _EV_ADD = 0x1

_EV_ADD const #

const _EV_ADD = 0x1

_EV_ADD const #

const _EV_ADD = 0x1

_EV_ADD const #

const _EV_ADD = 0x1

_EV_ADD const #

const _EV_ADD = 0x1

_EV_ADD const #

const _EV_ADD = 0x1

_EV_ADD const #

const _EV_ADD = 0x1

_EV_ADD const #

const _EV_ADD = 0x1

_EV_ADD const #

const _EV_ADD = 0x1

_EV_ADD const #

const _EV_ADD = 0x1

_EV_ADD const #

const _EV_ADD = 0x1

_EV_CLEAR const #

const _EV_CLEAR = 0x20

_EV_CLEAR const #

const _EV_CLEAR = 0x20

_EV_CLEAR const #

const _EV_CLEAR = 0x20

_EV_CLEAR const #

const _EV_CLEAR = 0x20

_EV_CLEAR const #

const _EV_CLEAR = 0x20

_EV_CLEAR const #

const _EV_CLEAR = 0x20

_EV_CLEAR const #

const _EV_CLEAR = 0x20

_EV_CLEAR const #

const _EV_CLEAR = 0x20

_EV_CLEAR const #

const _EV_CLEAR = 0x20

_EV_CLEAR const #

const _EV_CLEAR = 0x20

_EV_CLEAR const #

const _EV_CLEAR = 0x20

_EV_CLEAR const #

const _EV_CLEAR = 0x20

_EV_CLEAR const #

const _EV_CLEAR = 0x20

_EV_CLEAR const #

const _EV_CLEAR = 0x20

_EV_CLEAR const #

const _EV_CLEAR = 0x20

_EV_CLEAR const #

const _EV_CLEAR = 0x20

_EV_CLEAR const #

const _EV_CLEAR = 0x20

_EV_CLEAR const #

const _EV_CLEAR = 0x20

_EV_CLEAR const #

const _EV_CLEAR = 0x20

_EV_DELETE const #

const _EV_DELETE = 0x2

_EV_DELETE const #

const _EV_DELETE = 0x2

_EV_DELETE const #

const _EV_DELETE = 0x2

_EV_DELETE const #

const _EV_DELETE = 0x2

_EV_DELETE const #

const _EV_DELETE = 0x2

_EV_DELETE const #

const _EV_DELETE = 0x2

_EV_DELETE const #

const _EV_DELETE = 0x2

_EV_DELETE const #

const _EV_DELETE = 0x2

_EV_DELETE const #

const _EV_DELETE = 0x2

_EV_DELETE const #

const _EV_DELETE = 0x2

_EV_DELETE const #

const _EV_DELETE = 0x2

_EV_DELETE const #

const _EV_DELETE = 0x2

_EV_DELETE const #

const _EV_DELETE = 0x2

_EV_DELETE const #

const _EV_DELETE = 0x2

_EV_DELETE const #

const _EV_DELETE = 0x2

_EV_DELETE const #

const _EV_DELETE = 0x2

_EV_DELETE const #

const _EV_DELETE = 0x2

_EV_DELETE const #

const _EV_DELETE = 0x2

_EV_DELETE const #

const _EV_DELETE = 0x2

_EV_DISABLE const #

const _EV_DISABLE = 0x8

_EV_DISABLE const #

const _EV_DISABLE = 0x8

_EV_DISABLE const #

const _EV_DISABLE = 0x8

_EV_DISABLE const #

const _EV_DISABLE = 0x8

_EV_DISABLE const #

const _EV_DISABLE = 0x8

_EV_DISABLE const #

const _EV_DISABLE = 0x8

_EV_DISABLE const #

const _EV_DISABLE = 0x8

_EV_DISABLE const #

const _EV_DISABLE = 0x8

_EV_DISABLE const #

const _EV_DISABLE = 0x8

_EV_DISABLE const #

const _EV_DISABLE = 0x8

_EV_DISABLE const #

const _EV_DISABLE = 0x8

_EV_DISABLE const #

const _EV_DISABLE = 0x8

_EV_ENABLE const #

const _EV_ENABLE = 0x4

_EV_ENABLE const #

const _EV_ENABLE = 0x4

_EV_ENABLE const #

const _EV_ENABLE = 0x4

_EV_ENABLE const #

const _EV_ENABLE = 0x4

_EV_ENABLE const #

const _EV_ENABLE = 0x4

_EV_ENABLE const #

const _EV_ENABLE = 0x4

_EV_ENABLE const #

const _EV_ENABLE = 0x4

_EV_ENABLE const #

const _EV_ENABLE = 0x4

_EV_ENABLE const #

const _EV_ENABLE = 0x4

_EV_ENABLE const #

const _EV_ENABLE = 0x4

_EV_ENABLE const #

const _EV_ENABLE = 0x4

_EV_ENABLE const #

const _EV_ENABLE = 0x4

_EV_EOF const #

const _EV_EOF = 0x8000

_EV_EOF const #

const _EV_EOF = 0x8000

_EV_EOF const #

const _EV_EOF = 0x8000

_EV_EOF const #

const _EV_EOF = 0x8000

_EV_EOF const #

const _EV_EOF = 0x8000

_EV_EOF const #

const _EV_EOF = 0x8000

_EV_EOF const #

const _EV_EOF = 0x8000

_EV_EOF const #

const _EV_EOF = 0x8000

_EV_EOF const #

const _EV_EOF = 0x8000

_EV_EOF const #

const _EV_EOF = 0x8000

_EV_EOF const #

const _EV_EOF = 0x8000

_EV_EOF const #

const _EV_EOF = 0x8000

_EV_EOF const #

const _EV_EOF = 0x8000

_EV_EOF const #

const _EV_EOF = 0x8000

_EV_EOF const #

const _EV_EOF = 0x8000

_EV_EOF const #

const _EV_EOF = 0x8000

_EV_EOF const #

const _EV_EOF = 0x8000

_EV_EOF const #

const _EV_EOF = 0x8000

_EV_EOF const #

const _EV_EOF = 0x8000

_EV_ERROR const #

const _EV_ERROR = 0x4000

_EV_ERROR const #

const _EV_ERROR = 0x4000

_EV_ERROR const #

const _EV_ERROR = 0x4000

_EV_ERROR const #

const _EV_ERROR = 0x4000

_EV_ERROR const #

const _EV_ERROR = 0x4000

_EV_ERROR const #

const _EV_ERROR = 0x4000

_EV_ERROR const #

const _EV_ERROR = 0x4000

_EV_ERROR const #

const _EV_ERROR = 0x4000

_EV_ERROR const #

const _EV_ERROR = 0x4000

_EV_ERROR const #

const _EV_ERROR = 0x4000

_EV_ERROR const #

const _EV_ERROR = 0x4000

_EV_ERROR const #

const _EV_ERROR = 0x4000

_EV_ERROR const #

const _EV_ERROR = 0x4000

_EV_ERROR const #

const _EV_ERROR = 0x4000

_EV_ERROR const #

const _EV_ERROR = 0x4000

_EV_ERROR const #

const _EV_ERROR = 0x4000

_EV_ERROR const #

const _EV_ERROR = 0x4000

_EV_ERROR const #

const _EV_ERROR = 0x4000

_EV_ERROR const #

const _EV_ERROR = 0x4000

_EV_RECEIPT const #

const _EV_RECEIPT = 0

_EV_RECEIPT const #

const _EV_RECEIPT = 0

_EV_RECEIPT const #

const _EV_RECEIPT = 0x40

_EV_RECEIPT const #

const _EV_RECEIPT = 0x40

_EV_RECEIPT const #

const _EV_RECEIPT = 0

_EV_RECEIPT const #

const _EV_RECEIPT = 0

_EV_RECEIPT const #

const _EV_RECEIPT = 0x40

_EV_RECEIPT const #

const _EV_RECEIPT = 0x40

_EV_RECEIPT const #

const _EV_RECEIPT = 0x40

_EV_RECEIPT const #

const _EV_RECEIPT = 0x40

_EV_RECEIPT const #

const _EV_RECEIPT = 0x40

_EWOULDBLOCK const #

const _EWOULDBLOCK = _EAGAIN

_EWOULDBLOCK const #

const _EWOULDBLOCK = 0xb

_EXCEPTION_ACCESS_VIOLATION const #

const _EXCEPTION_ACCESS_VIOLATION = 0xc0000005

_EXCEPTION_BREAKPOINT const #

const _EXCEPTION_BREAKPOINT = 0x80000003

_EXCEPTION_CONTINUE_EXECUTION const #

const _EXCEPTION_CONTINUE_EXECUTION = *ast.UnaryExpr

_EXCEPTION_CONTINUE_SEARCH_SEH const #

const _EXCEPTION_CONTINUE_SEARCH_SEH = 0x1

_EXCEPTION_FLT_DENORMAL_OPERAND const #

const _EXCEPTION_FLT_DENORMAL_OPERAND = 0xc000008d

_EXCEPTION_FLT_DIVIDE_BY_ZERO const #

const _EXCEPTION_FLT_DIVIDE_BY_ZERO = 0xc000008e

_EXCEPTION_FLT_INEXACT_RESULT const #

const _EXCEPTION_FLT_INEXACT_RESULT = 0xc000008f

_EXCEPTION_FLT_OVERFLOW const #

const _EXCEPTION_FLT_OVERFLOW = 0xc0000091

_EXCEPTION_FLT_UNDERFLOW const #

const _EXCEPTION_FLT_UNDERFLOW = 0xc0000093

_EXCEPTION_ILLEGAL_INSTRUCTION const #

const _EXCEPTION_ILLEGAL_INSTRUCTION = 0xc000001d

_EXCEPTION_INT_DIVIDE_BY_ZERO const #

const _EXCEPTION_INT_DIVIDE_BY_ZERO = 0xc0000094

_EXCEPTION_INT_OVERFLOW const #

const _EXCEPTION_INT_OVERFLOW = 0xc0000095

_EXCEPTION_IN_PAGE_ERROR const #

const _EXCEPTION_IN_PAGE_ERROR = 0xc0000006

_ExitProcess var #

Following syscalls are available on every Windows PC. All these variables are set by the Windows executable loader before the Go program starts.

var _ExitProcess stdFunction

_FD_CLOEXEC const #

const _FD_CLOEXEC = 1

_FORK_NOSIGCHLD const #

const _FORK_NOSIGCHLD = 0x1

_FORK_WAITPID const #

const _FORK_WAITPID = 0x2

_FPE_FLTDIV const #

const _FPE_FLTDIV = 0x16

_FPE_FLTDIV const #

const _FPE_FLTDIV = 0x3

_FPE_FLTDIV const #

const _FPE_FLTDIV = 0x3

_FPE_FLTDIV const #

const _FPE_FLTDIV = 0x3

_FPE_FLTDIV const #

const _FPE_FLTDIV = 0x3

_FPE_FLTDIV const #

const _FPE_FLTDIV = 0x3

_FPE_FLTDIV const #

const _FPE_FLTDIV = 0x3

_FPE_FLTDIV const #

Constants

const _FPE_FLTDIV = 0x3

_FPE_FLTDIV const #

const _FPE_FLTDIV = 0x3

_FPE_FLTDIV const #

const _FPE_FLTDIV = 0x3

_FPE_FLTDIV const #

const _FPE_FLTDIV = 0x3

_FPE_FLTDIV const #

const _FPE_FLTDIV = 0x3

_FPE_FLTDIV const #

const _FPE_FLTDIV = 0x3

_FPE_FLTDIV const #

const _FPE_FLTDIV = 0x3

_FPE_FLTDIV const #

const _FPE_FLTDIV = 0x3

_FPE_FLTDIV const #

const _FPE_FLTDIV = 0x3

_FPE_FLTDIV const #

const _FPE_FLTDIV = 0x1

_FPE_FLTDIV const #

const _FPE_FLTDIV = 0x3

_FPE_FLTDIV const #

const _FPE_FLTDIV = 0x3

_FPE_FLTDIV const #

const _FPE_FLTDIV = 0x3

_FPE_FLTDIV const #

const _FPE_FLTDIV = 0x3

_FPE_FLTDIV const #

const _FPE_FLTDIV = 0x3

_FPE_FLTDIV const #

const _FPE_FLTDIV = 0x3

_FPE_FLTDIV const #

const _FPE_FLTDIV = 0x3

_FPE_FLTDIV const #

const _FPE_FLTDIV = 0x3

_FPE_FLTDIV const #

const _FPE_FLTDIV = 0x3

_FPE_FLTDIV const #

const _FPE_FLTDIV = 0x1

_FPE_FLTDIV const #

const _FPE_FLTDIV = 0x3

_FPE_FLTDIV const #

const _FPE_FLTDIV = 0x3

_FPE_FLTDIV const #

const _FPE_FLTDIV = C.FPE_FLTDIV

_FPE_FLTDIV const #

const _FPE_FLTDIV = 0x3

_FPE_FLTDIV const #

const _FPE_FLTDIV = 0x3

_FPE_FLTDIV const #

const _FPE_FLTDIV = 0x3

_FPE_FLTINV const #

const _FPE_FLTINV = 0x7

_FPE_FLTINV const #

const _FPE_FLTINV = 0x7

_FPE_FLTINV const #

const _FPE_FLTINV = 0x7

_FPE_FLTINV const #

const _FPE_FLTINV = 0x7

_FPE_FLTINV const #

const _FPE_FLTINV = 0x7

_FPE_FLTINV const #

const _FPE_FLTINV = 0x7

_FPE_FLTINV const #

const _FPE_FLTINV = 0x7

_FPE_FLTINV const #

const _FPE_FLTINV = 0x7

_FPE_FLTINV const #

const _FPE_FLTINV = 0x7

_FPE_FLTINV const #

const _FPE_FLTINV = 0x7

_FPE_FLTINV const #

const _FPE_FLTINV = C.FPE_FLTINV

_FPE_FLTINV const #

const _FPE_FLTINV = 0x7

_FPE_FLTINV const #

const _FPE_FLTINV = 0x7

_FPE_FLTINV const #

const _FPE_FLTINV = 0x7

_FPE_FLTINV const #

const _FPE_FLTINV = 0x7

_FPE_FLTINV const #

const _FPE_FLTINV = 0x7

_FPE_FLTINV const #

const _FPE_FLTINV = 0x7

_FPE_FLTINV const #

Constants

const _FPE_FLTINV = 0x7

_FPE_FLTINV const #

const _FPE_FLTINV = 0x7

_FPE_FLTINV const #

const _FPE_FLTINV = 0x7

_FPE_FLTINV const #

const _FPE_FLTINV = 0x7

_FPE_FLTINV const #

const _FPE_FLTINV = 0x7

_FPE_FLTINV const #

const _FPE_FLTINV = 0x7

_FPE_FLTINV const #

const _FPE_FLTINV = 0x5

_FPE_FLTINV const #

const _FPE_FLTINV = 0x7

_FPE_FLTINV const #

const _FPE_FLTINV = 0x7

_FPE_FLTINV const #

const _FPE_FLTINV = 0x7

_FPE_FLTINV const #

const _FPE_FLTINV = 0x5

_FPE_FLTINV const #

const _FPE_FLTINV = 0x1a

_FPE_FLTINV const #

const _FPE_FLTINV = 0x7

_FPE_FLTINV const #

const _FPE_FLTINV = 0x7

_FPE_FLTINV const #

const _FPE_FLTINV = 0x7

_FPE_FLTINV const #

const _FPE_FLTINV = 0x7

_FPE_FLTOVF const #

const _FPE_FLTOVF = 0x4

_FPE_FLTOVF const #

const _FPE_FLTOVF = 0x4

_FPE_FLTOVF const #

const _FPE_FLTOVF = 0x4

_FPE_FLTOVF const #

const _FPE_FLTOVF = 0x4

_FPE_FLTOVF const #

const _FPE_FLTOVF = 0x4

_FPE_FLTOVF const #

const _FPE_FLTOVF = 0x4

_FPE_FLTOVF const #

const _FPE_FLTOVF = 0x4

_FPE_FLTOVF const #

const _FPE_FLTOVF = 0x17

_FPE_FLTOVF const #

const _FPE_FLTOVF = 0x4

_FPE_FLTOVF const #

const _FPE_FLTOVF = 0x4

_FPE_FLTOVF const #

const _FPE_FLTOVF = 0x4

_FPE_FLTOVF const #

const _FPE_FLTOVF = 0x4

_FPE_FLTOVF const #

const _FPE_FLTOVF = 0x4

_FPE_FLTOVF const #

const _FPE_FLTOVF = 0x4

_FPE_FLTOVF const #

const _FPE_FLTOVF = 0x2

_FPE_FLTOVF const #

const _FPE_FLTOVF = 0x4

_FPE_FLTOVF const #

const _FPE_FLTOVF = 0x4

_FPE_FLTOVF const #

const _FPE_FLTOVF = 0x4

_FPE_FLTOVF const #

const _FPE_FLTOVF = 0x4

_FPE_FLTOVF const #

const _FPE_FLTOVF = 0x4

_FPE_FLTOVF const #

const _FPE_FLTOVF = 0x4

_FPE_FLTOVF const #

const _FPE_FLTOVF = 0x4

_FPE_FLTOVF const #

const _FPE_FLTOVF = 0x4

_FPE_FLTOVF const #

Constants

const _FPE_FLTOVF = 0x4

_FPE_FLTOVF const #

const _FPE_FLTOVF = 0x4

_FPE_FLTOVF const #

const _FPE_FLTOVF = 0x4

_FPE_FLTOVF const #

const _FPE_FLTOVF = 0x4

_FPE_FLTOVF const #

const _FPE_FLTOVF = C.FPE_FLTOVF

_FPE_FLTOVF const #

const _FPE_FLTOVF = 0x4

_FPE_FLTOVF const #

const _FPE_FLTOVF = 0x2

_FPE_FLTOVF const #

const _FPE_FLTOVF = 0x4

_FPE_FLTOVF const #

const _FPE_FLTOVF = 0x4

_FPE_FLTOVF const #

const _FPE_FLTOVF = 0x4

_FPE_FLTRES const #

const _FPE_FLTRES = 0x4

_FPE_FLTRES const #

const _FPE_FLTRES = 0x6

_FPE_FLTRES const #

const _FPE_FLTRES = 0x6

_FPE_FLTRES const #

const _FPE_FLTRES = 0x6

_FPE_FLTRES const #

const _FPE_FLTRES = 0x6

_FPE_FLTRES const #

const _FPE_FLTRES = 0x6

_FPE_FLTRES const #

Constants

const _FPE_FLTRES = 0x6

_FPE_FLTRES const #

const _FPE_FLTRES = 0x6

_FPE_FLTRES const #

const _FPE_FLTRES = 0x6

_FPE_FLTRES const #

const _FPE_FLTRES = 0x6

_FPE_FLTRES const #

const _FPE_FLTRES = 0x6

_FPE_FLTRES const #

const _FPE_FLTRES = 0x6

_FPE_FLTRES const #

const _FPE_FLTRES = 0x6

_FPE_FLTRES const #

const _FPE_FLTRES = 0x6

_FPE_FLTRES const #

const _FPE_FLTRES = 0x6

_FPE_FLTRES const #

const _FPE_FLTRES = 0x6

_FPE_FLTRES const #

const _FPE_FLTRES = 0x6

_FPE_FLTRES const #

const _FPE_FLTRES = 0x6

_FPE_FLTRES const #

const _FPE_FLTRES = C.FPE_FLTRES

_FPE_FLTRES const #

const _FPE_FLTRES = 0x6

_FPE_FLTRES const #

const _FPE_FLTRES = 0x6

_FPE_FLTRES const #

const _FPE_FLTRES = 0x6

_FPE_FLTRES const #

const _FPE_FLTRES = 0x19

_FPE_FLTRES const #

const _FPE_FLTRES = 0x6

_FPE_FLTRES const #

const _FPE_FLTRES = 0x4

_FPE_FLTRES const #

const _FPE_FLTRES = 0x6

_FPE_FLTRES const #

const _FPE_FLTRES = 0x6

_FPE_FLTRES const #

const _FPE_FLTRES = 0x6

_FPE_FLTRES const #

const _FPE_FLTRES = 0x6

_FPE_FLTRES const #

const _FPE_FLTRES = 0x6

_FPE_FLTRES const #

const _FPE_FLTRES = 0x6

_FPE_FLTRES const #

const _FPE_FLTRES = 0x6

_FPE_FLTRES const #

const _FPE_FLTRES = 0x6

_FPE_FLTSUB const #

const _FPE_FLTSUB = 0x8

_FPE_FLTSUB const #

const _FPE_FLTSUB = 0x8

_FPE_FLTSUB const #

const _FPE_FLTSUB = 0x8

_FPE_FLTSUB const #

const _FPE_FLTSUB = 0x8

_FPE_FLTSUB const #

const _FPE_FLTSUB = 0x8

_FPE_FLTSUB const #

const _FPE_FLTSUB = 0x8

_FPE_FLTSUB const #

const _FPE_FLTSUB = 0x8

_FPE_FLTSUB const #

const _FPE_FLTSUB = 0x8

_FPE_FLTSUB const #

const _FPE_FLTSUB = 0x6

_FPE_FLTSUB const #

const _FPE_FLTSUB = 0x8

_FPE_FLTSUB const #

const _FPE_FLTSUB = 0x1b

_FPE_FLTSUB const #

const _FPE_FLTSUB = 0x8

_FPE_FLTSUB const #

const _FPE_FLTSUB = 0x8

_FPE_FLTSUB const #

const _FPE_FLTSUB = 0x6

_FPE_FLTSUB const #

const _FPE_FLTSUB = C.FPE_FLTSUB

_FPE_FLTSUB const #

const _FPE_FLTSUB = 0x8

_FPE_FLTSUB const #

const _FPE_FLTSUB = 0x8

_FPE_FLTSUB const #

const _FPE_FLTSUB = 0x8

_FPE_FLTSUB const #

const _FPE_FLTSUB = 0x8

_FPE_FLTSUB const #

const _FPE_FLTSUB = 0x8

_FPE_FLTSUB const #

const _FPE_FLTSUB = 0x8

_FPE_FLTSUB const #

const _FPE_FLTSUB = 0x8

_FPE_FLTSUB const #

const _FPE_FLTSUB = 0x8

_FPE_FLTSUB const #

const _FPE_FLTSUB = 0x8

_FPE_FLTSUB const #

const _FPE_FLTSUB = 0x8

_FPE_FLTSUB const #

const _FPE_FLTSUB = 0x8

_FPE_FLTSUB const #

Constants

const _FPE_FLTSUB = 0x8

_FPE_FLTSUB const #

const _FPE_FLTSUB = 0x8

_FPE_FLTSUB const #

const _FPE_FLTSUB = 0x8

_FPE_FLTSUB const #

const _FPE_FLTSUB = 0x8

_FPE_FLTSUB const #

const _FPE_FLTSUB = 0x8

_FPE_FLTSUB const #

const _FPE_FLTSUB = 0x8

_FPE_FLTSUB const #

const _FPE_FLTSUB = 0x8

_FPE_FLTUND const #

const _FPE_FLTUND = 0x5

_FPE_FLTUND const #

const _FPE_FLTUND = 0x5

_FPE_FLTUND const #

const _FPE_FLTUND = 0x5

_FPE_FLTUND const #

const _FPE_FLTUND = 0x5

_FPE_FLTUND const #

const _FPE_FLTUND = 0x5

_FPE_FLTUND const #

const _FPE_FLTUND = 0x5

_FPE_FLTUND const #

const _FPE_FLTUND = 0x5

_FPE_FLTUND const #

Constants

const _FPE_FLTUND = 0x5

_FPE_FLTUND const #

const _FPE_FLTUND = 0x5

_FPE_FLTUND const #

const _FPE_FLTUND = 0x5

_FPE_FLTUND const #

const _FPE_FLTUND = 0x5

_FPE_FLTUND const #

const _FPE_FLTUND = 0x5

_FPE_FLTUND const #

const _FPE_FLTUND = 0x5

_FPE_FLTUND const #

const _FPE_FLTUND = 0x5

_FPE_FLTUND const #

const _FPE_FLTUND = 0x5

_FPE_FLTUND const #

const _FPE_FLTUND = 0x5

_FPE_FLTUND const #

const _FPE_FLTUND = 0x5

_FPE_FLTUND const #

const _FPE_FLTUND = 0x5

_FPE_FLTUND const #

const _FPE_FLTUND = 0x5

_FPE_FLTUND const #

const _FPE_FLTUND = 0x5

_FPE_FLTUND const #

const _FPE_FLTUND = 0x3

_FPE_FLTUND const #

const _FPE_FLTUND = 0x5

_FPE_FLTUND const #

const _FPE_FLTUND = 0x5

_FPE_FLTUND const #

const _FPE_FLTUND = 0x5

_FPE_FLTUND const #

const _FPE_FLTUND = 0x5

_FPE_FLTUND const #

const _FPE_FLTUND = 0x5

_FPE_FLTUND const #

const _FPE_FLTUND = 0x3

_FPE_FLTUND const #

const _FPE_FLTUND = 0x18

_FPE_FLTUND const #

const _FPE_FLTUND = 0x5

_FPE_FLTUND const #

const _FPE_FLTUND = 0x5

_FPE_FLTUND const #

const _FPE_FLTUND = 0x5

_FPE_FLTUND const #

const _FPE_FLTUND = C.FPE_FLTUND

_FPE_FLTUND const #

const _FPE_FLTUND = 0x5

_FPE_INTDIV const #

const _FPE_INTDIV = 0x1

_FPE_INTDIV const #

const _FPE_INTDIV = 0x7

_FPE_INTDIV const #

const _FPE_INTDIV = 0x1

_FPE_INTDIV const #

const _FPE_INTDIV = 0x2

_FPE_INTDIV const #

const _FPE_INTDIV = 0x1

_FPE_INTDIV const #

const _FPE_INTDIV = 0x1

_FPE_INTDIV const #

const _FPE_INTDIV = 0x1

_FPE_INTDIV const #

const _FPE_INTDIV = 0x1

_FPE_INTDIV const #

const _FPE_INTDIV = 0x1

_FPE_INTDIV const #

const _FPE_INTDIV = 0x1

_FPE_INTDIV const #

const _FPE_INTDIV = 0x14

_FPE_INTDIV const #

Constants

const _FPE_INTDIV = 0x1

_FPE_INTDIV const #

const _FPE_INTDIV = 0x1

_FPE_INTDIV const #

const _FPE_INTDIV = 0x1

_FPE_INTDIV const #

const _FPE_INTDIV = 0x1

_FPE_INTDIV const #

const _FPE_INTDIV = 0x7

_FPE_INTDIV const #

const _FPE_INTDIV = 0x1

_FPE_INTDIV const #

const _FPE_INTDIV = 0x1

_FPE_INTDIV const #

const _FPE_INTDIV = 0x1

_FPE_INTDIV const #

const _FPE_INTDIV = 0x2

_FPE_INTDIV const #

const _FPE_INTDIV = 0x2

_FPE_INTDIV const #

const _FPE_INTDIV = 0x2

_FPE_INTDIV const #

const _FPE_INTDIV = 0x1

_FPE_INTDIV const #

const _FPE_INTDIV = 0x1

_FPE_INTDIV const #

const _FPE_INTDIV = 0x1

_FPE_INTDIV const #

const _FPE_INTDIV = C.FPE_INTDIV

_FPE_INTDIV const #

const _FPE_INTDIV = 0x1

_FPE_INTDIV const #

const _FPE_INTDIV = 0x1

_FPE_INTDIV const #

const _FPE_INTDIV = 0x2

_FPE_INTDIV const #

const _FPE_INTDIV = 0x1

_FPE_INTDIV const #

const _FPE_INTDIV = 0x1

_FPE_INTDIV const #

const _FPE_INTDIV = 0x1

_FPE_INTDIV const #

const _FPE_INTDIV = 0x2

_FPE_INTOVF const #

const _FPE_INTOVF = 0x1

_FPE_INTOVF const #

const _FPE_INTOVF = 0x2

_FPE_INTOVF const #

const _FPE_INTOVF = 0x8

_FPE_INTOVF const #

const _FPE_INTOVF = 0x2

_FPE_INTOVF const #

const _FPE_INTOVF = 0x2

_FPE_INTOVF const #

const _FPE_INTOVF = 0x1

_FPE_INTOVF const #

const _FPE_INTOVF = 0x2

_FPE_INTOVF const #

const _FPE_INTOVF = 0x2

_FPE_INTOVF const #

const _FPE_INTOVF = 0x2

_FPE_INTOVF const #

const _FPE_INTOVF = 0x1

_FPE_INTOVF const #

const _FPE_INTOVF = 0x2

_FPE_INTOVF const #

const _FPE_INTOVF = 0x2

_FPE_INTOVF const #

const _FPE_INTOVF = 0x2

_FPE_INTOVF const #

const _FPE_INTOVF = C.FPE_INTOVF

_FPE_INTOVF const #

const _FPE_INTOVF = 0x2

_FPE_INTOVF const #

const _FPE_INTOVF = 0x1

_FPE_INTOVF const #

const _FPE_INTOVF = 0x1

_FPE_INTOVF const #

const _FPE_INTOVF = 0x2

_FPE_INTOVF const #

const _FPE_INTOVF = 0x2

_FPE_INTOVF const #

const _FPE_INTOVF = 0x2

_FPE_INTOVF const #

const _FPE_INTOVF = 0x2

_FPE_INTOVF const #

const _FPE_INTOVF = 0x2

_FPE_INTOVF const #

const _FPE_INTOVF = 0x2

_FPE_INTOVF const #

const _FPE_INTOVF = 0x1

_FPE_INTOVF const #

const _FPE_INTOVF = 0x2

_FPE_INTOVF const #

const _FPE_INTOVF = 0x2

_FPE_INTOVF const #

Constants

const _FPE_INTOVF = 0x2

_FPE_INTOVF const #

const _FPE_INTOVF = 0x2

_FPE_INTOVF const #

const _FPE_INTOVF = 0x8

_FPE_INTOVF const #

const _FPE_INTOVF = 0x2

_FPE_INTOVF const #

const _FPE_INTOVF = 0x15

_FPE_INTOVF const #

const _FPE_INTOVF = 0x2

_FPE_INTOVF const #

const _FPE_INTOVF = 0x2

_FUTEX_PRIVATE_FLAG const #

const _FUTEX_PRIVATE_FLAG = 128

_FUTEX_WAIT_PRIVATE const #

const _FUTEX_WAIT_PRIVATE = *ast.BinaryExpr

_FUTEX_WAKE_PRIVATE const #

const _FUTEX_WAKE_PRIVATE = *ast.BinaryExpr

_F_DUP2FD const #

const _F_DUP2FD = 0x9

_F_GETFD const #

const _F_GETFD = 0x1

_F_GETFD const #

const _F_GETFD = C.F_GETFD

_F_GETFL const #

const _F_GETFL = 0x3

_F_GETFL const #

const _F_GETFL = C.F_GETFL

_F_GETFL const #

const _F_GETFL = 0x3

_F_GETFL const #

const _F_GETFL = 0x3

_F_SETFD const #

These values are the same on all known Unix systems. If we find a discrepancy some day, we can split them out.

const _F_SETFD = 2

_F_SETFL const #

const _F_SETFL = 0x4

_F_SETFL const #

const _F_SETFL = C.F_SETFL

_F_SETFL const #

const _F_SETFL = 0x4

_F_SETFL const #

const _F_SETFL = 0x4

_FinBlockSize const #

const _FinBlockSize = *ast.BinaryExpr

_FixAllocChunk const #

const _FixAllocChunk = *ast.BinaryExpr

_FreeEnvironmentStringsW var #

Following syscalls are available on every Windows PC. All these variables are set by the Windows executable loader before the Go program starts.

var _FreeEnvironmentStringsW stdFunction

_GCmark const #

const _GCmark

_GCmarktermination const #

const _GCmarktermination

_GCoff const #

const _GCoff = iota

_Gcopystack const #

_Gcopystack means this goroutine's stack is being moved. It is not executing user code and is not on a run queue. The stack is owned by the goroutine that put it in _Gcopystack.

const _Gcopystack

_Gdead const #

_Gdead means this goroutine is currently unused. It may be just exited, on a free list, or just being initialized. It is not executing user code. It may or may not have a stack allocated. The G and its stack (if any) are owned by the M that is exiting the G or that obtained the G from the free list.

const _Gdead

_Genqueue_unused const #

_Genqueue_unused is currently unused.

const _Genqueue_unused

_GetConsoleMode var #

Following syscalls are available on every Windows PC. All these variables are set by the Windows executable loader before the Go program starts.

var _GetConsoleMode stdFunction

_GetCurrentThreadId var #

Following syscalls are available on every Windows PC. All these variables are set by the Windows executable loader before the Go program starts.

var _GetCurrentThreadId stdFunction

_GetEnvironmentStringsW var #

Following syscalls are available on every Windows PC. All these variables are set by the Windows executable loader before the Go program starts.

var _GetEnvironmentStringsW stdFunction

_GetErrorMode var #

Following syscalls are available on every Windows PC. All these variables are set by the Windows executable loader before the Go program starts.

var _GetErrorMode stdFunction

_GetProcAddress var #

Following syscalls are available on every Windows PC. All these variables are set by the Windows executable loader before the Go program starts.

var _GetProcAddress stdFunction

_GetProcessAffinityMask var #

Following syscalls are available on every Windows PC. All these variables are set by the Windows executable loader before the Go program starts.

var _GetProcessAffinityMask stdFunction

_GetQueuedCompletionStatusEx var #

Following syscalls are available on every Windows PC. All these variables are set by the Windows executable loader before the Go program starts.

var _GetQueuedCompletionStatusEx stdFunction

_GetStdHandle var #

Following syscalls are available on every Windows PC. All these variables are set by the Windows executable loader before the Go program starts.

var _GetStdHandle stdFunction

_GetSystemDirectoryA var #

Following syscalls are available on every Windows PC. All these variables are set by the Windows executable loader before the Go program starts.

var _GetSystemDirectoryA stdFunction

_GetSystemInfo var #

Following syscalls are available on every Windows PC. All these variables are set by the Windows executable loader before the Go program starts.

var _GetSystemInfo stdFunction

_GetThreadContext var #

Following syscalls are available on every Windows PC. All these variables are set by the Windows executable loader before the Go program starts.

var _GetThreadContext stdFunction

_Gidle const #

_Gidle means this goroutine was just allocated and has not yet been initialized.

const _Gidle = iota

_Gmoribund_unused const #

_Gmoribund_unused is currently unused, but hardcoded in gdb scripts.

const _Gmoribund_unused

_GoidCacheBatch const #

Number of goroutine ids to grab from sched.goidgen to local per-P cache at once. 16 seems to provide enough amortization, but other than that it's mostly arbitrary number.

const _GoidCacheBatch = 16

_Gpreempted const #

_Gpreempted means this goroutine stopped itself for a suspendG preemption. It is like _Gwaiting, but nothing is yet responsible for ready()ing it. Some suspendG must CAS the status to _Gwaiting to take responsibility for ready()ing this G.

const _Gpreempted

_Grunnable const #

_Grunnable means this goroutine is on a run queue. It is not currently executing user code. The stack is not owned.

const _Grunnable

_Grunning const #

_Grunning means this goroutine may execute user code. The stack is owned by this goroutine. It is not on a run queue. It is assigned an M and a P (g.m and g.m.p are valid).

const _Grunning

_Gscan const #

_Gscan combined with one of the above states other than _Grunning indicates that GC is scanning the stack. The goroutine is not executing user code and the stack is owned by the goroutine that set the _Gscan bit. _Gscanrunning is different: it is used to briefly block state transitions while GC signals the G to scan its own stack. This is otherwise like _Grunning. atomicstatus&~Gscan gives the state the goroutine will return to when the scan completes.

const _Gscan = 0x1000

_Gscanpreempted const #

defined constants

const _Gscanpreempted = *ast.BinaryExpr

_Gscanrunnable const #

defined constants

const _Gscanrunnable = *ast.BinaryExpr

_Gscanrunning const #

defined constants

const _Gscanrunning = *ast.BinaryExpr

_Gscansyscall const #

defined constants

const _Gscansyscall = *ast.BinaryExpr

_Gscanwaiting const #

defined constants

const _Gscanwaiting = *ast.BinaryExpr

_Gsyscall const #

_Gsyscall means this goroutine is executing a system call. It is not executing user code. The stack is owned by this goroutine. It is not on a run queue. It is assigned an M.

const _Gsyscall

_Gwaiting const #

_Gwaiting means this goroutine is blocked in the runtime. It is not executing user code. It is not on a run queue, but should be recorded somewhere (e.g., a channel wait queue) so it can be ready()d when necessary. The stack is not owned *except* that a channel operation may read or write parts of the stack under the appropriate channel lock. Otherwise, it is not safe to access the stack after a goroutine enters _Gwaiting (e.g., it may get moved).

const _Gwaiting

_HPET_DEV_MAP_MAX const #

const _HPET_DEV_MAP_MAX = 10

_HPET_MAIN_COUNTER const #

const _HPET_MAIN_COUNTER = 0xf0

_HWCAP_VFP const #

const _HWCAP_VFP = *ast.BinaryExpr

_HWCAP_VFP const #

const _HWCAP_VFP = *ast.BinaryExpr

_HWCAP_VFPv3 const #

const _HWCAP_VFPv3 = *ast.BinaryExpr

_HWCAP_VFPv3 const #

const _HWCAP_VFPv3 = *ast.BinaryExpr

_HWCAP_VX const #

const _HWCAP_VX = *ast.BinaryExpr

_HW_NCPU const #

From DragonFly's

const _HW_NCPU = 3

_HW_NCPU const #

const _HW_NCPU = 3

_HW_NCPU const #

From OpenBSD's

const _HW_NCPU = 3

_HW_NCPU const #

From NetBSD's

const _HW_NCPU = 3

_HW_NCPUONLINE const #

From OpenBSD's

const _HW_NCPUONLINE = 25

_HW_NCPUONLINE const #

From NetBSD's

const _HW_NCPUONLINE = 16

_HW_PAGESIZE const #

From FreeBSD's

const _HW_PAGESIZE = 7

_HW_PAGESIZE const #

From NetBSD's

const _HW_PAGESIZE = 7

_HW_PAGESIZE const #

From OpenBSD's

const _HW_PAGESIZE = 7

_HW_PAGESIZE const #

const _HW_PAGESIZE = 7

_HW_PAGESIZE const #

From DragonFly's

const _HW_PAGESIZE = 7

_INFINITE const #

const _INFINITE = 0xffffffff

_INVALID_HANDLE_VALUE const #

const _INVALID_HANDLE_VALUE = *ast.UnaryExpr

_ITIMER_PROF const #

const _ITIMER_PROF = 0x2

_ITIMER_PROF const #

const _ITIMER_PROF = 0x2

_ITIMER_PROF const #

const _ITIMER_PROF = 0x2

_ITIMER_PROF const #

const _ITIMER_PROF = 0x2

_ITIMER_PROF const #

const _ITIMER_PROF = 0x2

_ITIMER_PROF const #

const _ITIMER_PROF = 0x2

_ITIMER_PROF const #

const _ITIMER_PROF = 0x2

_ITIMER_PROF const #

const _ITIMER_PROF = 0x2

_ITIMER_PROF const #

const _ITIMER_PROF = 0x2

_ITIMER_PROF const #

const _ITIMER_PROF = C.ITIMER_PROF

_ITIMER_PROF const #

const _ITIMER_PROF = 0x2

_ITIMER_PROF const #

const _ITIMER_PROF = 0x2

_ITIMER_PROF const #

const _ITIMER_PROF = 0x2

_ITIMER_PROF const #

Constants

const _ITIMER_PROF = 0x2

_ITIMER_PROF const #

const _ITIMER_PROF = 0x2

_ITIMER_PROF const #

const _ITIMER_PROF = 0x2

_ITIMER_PROF const #

const _ITIMER_PROF = 0x2

_ITIMER_PROF const #

const _ITIMER_PROF = 0x2

_ITIMER_PROF const #

const _ITIMER_PROF = 0x2

_ITIMER_PROF const #

const _ITIMER_PROF = 0x2

_ITIMER_PROF const #

const _ITIMER_PROF = 0x2

_ITIMER_PROF const #

const _ITIMER_PROF = 0x2

_ITIMER_PROF const #

const _ITIMER_PROF = 0x2

_ITIMER_PROF const #

const _ITIMER_PROF = 0x2

_ITIMER_PROF const #

const _ITIMER_PROF = 0x2

_ITIMER_PROF const #

const _ITIMER_PROF = 0x2

_ITIMER_PROF const #

const _ITIMER_PROF = 0x2

_ITIMER_PROF const #

const _ITIMER_PROF = 0x2

_ITIMER_PROF const #

const _ITIMER_PROF = 0x2

_ITIMER_PROF const #

const _ITIMER_PROF = 0x2

_ITIMER_PROF const #

const _ITIMER_PROF = 0x2

_ITIMER_PROF const #

const _ITIMER_PROF = 0x2

_ITIMER_PROF const #

const _ITIMER_PROF = 0x2

_ITIMER_REAL const #

const _ITIMER_REAL = 0x0

_ITIMER_REAL const #

const _ITIMER_REAL = 0x0

_ITIMER_REAL const #

const _ITIMER_REAL = 0x0

_ITIMER_REAL const #

const _ITIMER_REAL = 0x0

_ITIMER_REAL const #

const _ITIMER_REAL = 0x0

_ITIMER_REAL const #

const _ITIMER_REAL = 0x0

_ITIMER_REAL const #

const _ITIMER_REAL = 0x0

_ITIMER_REAL const #

const _ITIMER_REAL = 0x0

_ITIMER_REAL const #

const _ITIMER_REAL = 0x0

_ITIMER_REAL const #

const _ITIMER_REAL = 0x0

_ITIMER_REAL const #

const _ITIMER_REAL = 0x0

_ITIMER_REAL const #

const _ITIMER_REAL = 0x0

_ITIMER_REAL const #

const _ITIMER_REAL = 0x0

_ITIMER_REAL const #

const _ITIMER_REAL = 0x0

_ITIMER_REAL const #

const _ITIMER_REAL = 0x0

_ITIMER_REAL const #

const _ITIMER_REAL = 0x0

_ITIMER_REAL const #

const _ITIMER_REAL = 0x0

_ITIMER_REAL const #

const _ITIMER_REAL = 0x0

_ITIMER_REAL const #

const _ITIMER_REAL = 0x0

_ITIMER_REAL const #

const _ITIMER_REAL = 0x0

_ITIMER_REAL const #

const _ITIMER_REAL = C.ITIMER_REAL

_ITIMER_REAL const #

Constants

const _ITIMER_REAL = 0

_ITIMER_REAL const #

const _ITIMER_REAL = 0x0

_ITIMER_REAL const #

const _ITIMER_REAL = 0x0

_ITIMER_REAL const #

const _ITIMER_REAL = 0x0

_ITIMER_REAL const #

const _ITIMER_REAL = 0x0

_ITIMER_REAL const #

const _ITIMER_REAL = 0x0

_ITIMER_REAL const #

const _ITIMER_REAL = 0x0

_ITIMER_REAL const #

const _ITIMER_REAL = 0x0

_ITIMER_REAL const #

const _ITIMER_REAL = 0x0

_ITIMER_REAL const #

const _ITIMER_REAL = 0x0

_ITIMER_REAL const #

const _ITIMER_REAL = 0x0

_ITIMER_REAL const #

const _ITIMER_REAL = 0x0

_ITIMER_VIRTUAL const #

const _ITIMER_VIRTUAL = 0x1

_ITIMER_VIRTUAL const #

const _ITIMER_VIRTUAL = 0x1

_ITIMER_VIRTUAL const #

const _ITIMER_VIRTUAL = 0x1

_ITIMER_VIRTUAL const #

const _ITIMER_VIRTUAL = 0x1

_ITIMER_VIRTUAL const #

const _ITIMER_VIRTUAL = 0x1

_ITIMER_VIRTUAL const #

const _ITIMER_VIRTUAL = 0x1

_ITIMER_VIRTUAL const #

const _ITIMER_VIRTUAL = 0x1

_ITIMER_VIRTUAL const #

const _ITIMER_VIRTUAL = 0x1

_ITIMER_VIRTUAL const #

const _ITIMER_VIRTUAL = 0x1

_ITIMER_VIRTUAL const #

const _ITIMER_VIRTUAL = 0x1

_ITIMER_VIRTUAL const #

const _ITIMER_VIRTUAL = 0x1

_ITIMER_VIRTUAL const #

const _ITIMER_VIRTUAL = 0x1

_ITIMER_VIRTUAL const #

const _ITIMER_VIRTUAL = 0x1

_ITIMER_VIRTUAL const #

const _ITIMER_VIRTUAL = 0x1

_ITIMER_VIRTUAL const #

const _ITIMER_VIRTUAL = 0x1

_ITIMER_VIRTUAL const #

const _ITIMER_VIRTUAL = 0x1

_ITIMER_VIRTUAL const #

const _ITIMER_VIRTUAL = 0x1

_ITIMER_VIRTUAL const #

const _ITIMER_VIRTUAL = 0x1

_ITIMER_VIRTUAL const #

const _ITIMER_VIRTUAL = 0x1

_ITIMER_VIRTUAL const #

Constants

const _ITIMER_VIRTUAL = 0x1

_ITIMER_VIRTUAL const #

const _ITIMER_VIRTUAL = 0x1

_ITIMER_VIRTUAL const #

const _ITIMER_VIRTUAL = 0x1

_ITIMER_VIRTUAL const #

const _ITIMER_VIRTUAL = 0x1

_ITIMER_VIRTUAL const #

const _ITIMER_VIRTUAL = 0x1

_ITIMER_VIRTUAL const #

const _ITIMER_VIRTUAL = 0x1

_ITIMER_VIRTUAL const #

const _ITIMER_VIRTUAL = 0x1

_ITIMER_VIRTUAL const #

const _ITIMER_VIRTUAL = C.ITIMER_VIRTUAL

_ITIMER_VIRTUAL const #

const _ITIMER_VIRTUAL = 0x1

_ITIMER_VIRTUAL const #

const _ITIMER_VIRTUAL = 0x1

_ITIMER_VIRTUAL const #

const _ITIMER_VIRTUAL = 0x1

_ITIMER_VIRTUAL const #

const _ITIMER_VIRTUAL = 0x1

_ITIMER_VIRTUAL const #

const _ITIMER_VIRTUAL = 0x1

_ITIMER_VIRTUAL const #

const _ITIMER_VIRTUAL = 0x1

_KERN_OSREV const #

From NetBSD's

const _KERN_OSREV = 3

_KindSpecialCleanup const #

_KindSpecialCleanup is for tracking cleanups.

const _KindSpecialCleanup = 6

_KindSpecialFinalizer const #

_KindSpecialFinalizer is for tracking finalizers.

const _KindSpecialFinalizer = 1

_KindSpecialPinCounter const #

_KindSpecialPinCounter is a special used for objects that are pinned multiple times

const _KindSpecialPinCounter = 5

_KindSpecialProfile const #

_KindSpecialProfile is for memory profiling.

const _KindSpecialProfile = 3

_KindSpecialReachable const #

_KindSpecialReachable is a special used for tracking reachability during testing.

const _KindSpecialReachable = 4

_KindSpecialWeakHandle const #

_KindSpecialWeakHandle is used for creating weak pointers.

const _KindSpecialWeakHandle = 2

_LOAD_LIBRARY_SEARCH_SYSTEM32 const #

const _LOAD_LIBRARY_SEARCH_SYSTEM32 = 0x00000800

_LWP_DETACHED const #

From

const _LWP_DETACHED = 0x00000040

_LoadLibraryExW var #

Following syscalls are available on every Windows PC. All these variables are set by the Windows executable loader before the Go program starts.

var _LoadLibraryExW stdFunction

_LoadLibraryW var #

Following syscalls are available on every Windows PC. All these variables are set by the Windows executable loader before the Go program starts.

var _LoadLibraryW stdFunction

_MADV_COLLAPSE const #

const _MADV_COLLAPSE = 0x19

_MADV_COLLAPSE const #

const _MADV_COLLAPSE = 0x19

_MADV_COLLAPSE const #

const _MADV_COLLAPSE = 0x19

_MADV_COLLAPSE const #

const _MADV_COLLAPSE = 0x19

_MADV_COLLAPSE const #

const _MADV_COLLAPSE = 0x19

_MADV_COLLAPSE const #

const _MADV_COLLAPSE = 0x19

_MADV_COLLAPSE const #

const _MADV_COLLAPSE = 0x19

_MADV_COLLAPSE const #

Constants

const _MADV_COLLAPSE = 0x19

_MADV_COLLAPSE const #

const _MADV_COLLAPSE = 0x19

_MADV_COLLAPSE const #

const _MADV_COLLAPSE = 0x19

_MADV_COLLAPSE const #

const _MADV_COLLAPSE = 0x19

_MADV_DONTNEED const #

const _MADV_DONTNEED = 0x4

_MADV_DONTNEED const #

const _MADV_DONTNEED = 0x4

_MADV_DONTNEED const #

const _MADV_DONTNEED = 0x4

_MADV_DONTNEED const #

const _MADV_DONTNEED = 0x4

_MADV_DONTNEED const #

Constants

const _MADV_DONTNEED = 0x4

_MADV_DONTNEED const #

const _MADV_DONTNEED = 0x4

_MADV_DONTNEED const #

const _MADV_DONTNEED = 0x4

_MADV_DONTNEED const #

const _MADV_DONTNEED = 0x4

_MADV_DONTNEED const #

const _MADV_DONTNEED = 0x4

_MADV_DONTNEED const #

const _MADV_DONTNEED = 0x4

_MADV_DONTNEED const #

const _MADV_DONTNEED = 0x4

_MADV_DONTNEED const #

const _MADV_DONTNEED = 0x4

_MADV_DONTNEED const #

const _MADV_DONTNEED = 0x4

_MADV_DONTNEED const #

const _MADV_DONTNEED = 0x4

_MADV_DONTNEED const #

const _MADV_DONTNEED = 0x4

_MADV_DONTNEED const #

const _MADV_DONTNEED = 0x4

_MADV_DONTNEED const #

const _MADV_DONTNEED = 0x4

_MADV_DONTNEED const #

const _MADV_DONTNEED = 0x4

_MADV_DONTNEED const #

const _MADV_DONTNEED = 0x4

_MADV_DONTNEED const #

const _MADV_DONTNEED = 0x4

_MADV_DONTNEED const #

const _MADV_DONTNEED = 0x4

_MADV_DONTNEED const #

const _MADV_DONTNEED = 0x4

_MADV_DONTNEED const #

const _MADV_DONTNEED = 0x4

_MADV_DONTNEED const #

const _MADV_DONTNEED = C.MADV_DONTNEED

_MADV_DONTNEED const #

const _MADV_DONTNEED = 0x4

_MADV_DONTNEED const #

const _MADV_DONTNEED = 0x4

_MADV_DONTNEED const #

const _MADV_DONTNEED = 0x4

_MADV_DONTNEED const #

const _MADV_DONTNEED = 0x4

_MADV_DONTNEED const #

const _MADV_DONTNEED = 0x4

_MADV_DONTNEED const #

const _MADV_DONTNEED = 0x4

_MADV_DONTNEED const #

const _MADV_DONTNEED = 0x4

_MADV_DONTNEED const #

const _MADV_DONTNEED = 0x4

_MADV_DONTNEED const #

const _MADV_DONTNEED = 0x4

_MADV_FREE const #

const _MADV_FREE = 0x6

_MADV_FREE const #

const _MADV_FREE = 0x5

_MADV_FREE const #

const _MADV_FREE = 0x6

_MADV_FREE const #

Constants

const _MADV_FREE = 0x8

_MADV_FREE const #

const _MADV_FREE = 0x6

_MADV_FREE const #

const _MADV_FREE = 0x6

_MADV_FREE const #

const _MADV_FREE = 0x6

_MADV_FREE const #

const _MADV_FREE = 0x8

_MADV_FREE const #

const _MADV_FREE = 0x5

_MADV_FREE const #

const _MADV_FREE = 0x8

_MADV_FREE const #

const _MADV_FREE = 0x8

_MADV_FREE const #

const _MADV_FREE = 0x5

_MADV_FREE const #

const _MADV_FREE = 0x5

_MADV_FREE const #

const _MADV_FREE = 0x8

_MADV_FREE const #

const _MADV_FREE = 0x6

_MADV_FREE const #

const _MADV_FREE = 0x6

_MADV_FREE const #

const _MADV_FREE = 0x8

_MADV_FREE const #

const _MADV_FREE = 0x6

_MADV_FREE const #

const _MADV_FREE = 0x8

_MADV_FREE const #

const _MADV_FREE = 0x6

_MADV_FREE const #

const _MADV_FREE = 0x5

_MADV_FREE const #

const _MADV_FREE = 0x5

_MADV_FREE const #

const _MADV_FREE = 0x8

_MADV_FREE const #

const _MADV_FREE = 0x8

_MADV_FREE const #

const _MADV_FREE = 0x5

_MADV_FREE const #

const _MADV_FREE = 0x5

_MADV_FREE const #

const _MADV_FREE = 0x8

_MADV_FREE const #

const _MADV_FREE = 0x5

_MADV_FREE const #

const _MADV_FREE = 0x6

_MADV_FREE const #

const _MADV_FREE = 0x6

_MADV_FREE const #

const _MADV_FREE = 0x8

_MADV_FREE_REUSABLE const #

const _MADV_FREE_REUSABLE = 0x7

_MADV_FREE_REUSABLE const #

const _MADV_FREE_REUSABLE = 0x7

_MADV_FREE_REUSE const #

const _MADV_FREE_REUSE = 0x8

_MADV_FREE_REUSE const #

const _MADV_FREE_REUSE = 0x8

_MADV_HUGEPAGE const #

const _MADV_HUGEPAGE = 0xe

_MADV_HUGEPAGE const #

const _MADV_HUGEPAGE = 0xe

_MADV_HUGEPAGE const #

Constants

const _MADV_HUGEPAGE = 0xe

_MADV_HUGEPAGE const #

const _MADV_HUGEPAGE = 0xe

_MADV_HUGEPAGE const #

const _MADV_HUGEPAGE = 0xe

_MADV_HUGEPAGE const #

const _MADV_HUGEPAGE = 0xe

_MADV_HUGEPAGE const #

const _MADV_HUGEPAGE = 0xe

_MADV_HUGEPAGE const #

const _MADV_HUGEPAGE = 0xe

_MADV_HUGEPAGE const #

const _MADV_HUGEPAGE = 0xe

_MADV_HUGEPAGE const #

const _MADV_HUGEPAGE = 0xe

_MADV_HUGEPAGE const #

const _MADV_HUGEPAGE = 0xe

_MADV_NOHUGEPAGE const #

const _MADV_NOHUGEPAGE = 0xf

_MADV_NOHUGEPAGE const #

const _MADV_NOHUGEPAGE = 0xf

_MADV_NOHUGEPAGE const #

const _MADV_NOHUGEPAGE = 0xf

_MADV_NOHUGEPAGE const #

const _MADV_NOHUGEPAGE = 0xf

_MADV_NOHUGEPAGE const #

const _MADV_NOHUGEPAGE = 0xf

_MADV_NOHUGEPAGE const #

const _MADV_NOHUGEPAGE = 0xf

_MADV_NOHUGEPAGE const #

const _MADV_NOHUGEPAGE = 0xf

_MADV_NOHUGEPAGE const #

const _MADV_NOHUGEPAGE = 0xf

_MADV_NOHUGEPAGE const #

const _MADV_NOHUGEPAGE = 0xf

_MADV_NOHUGEPAGE const #

Constants

const _MADV_NOHUGEPAGE = 0xf

_MADV_NOHUGEPAGE const #

const _MADV_NOHUGEPAGE = 0xf

_MAP_ANON const #

const _MAP_ANON = 0x1000

_MAP_ANON const #

const _MAP_ANON = 0x1000

_MAP_ANON const #

const _MAP_ANON = 1

_MAP_ANON const #

const _MAP_ANON = 0x1000

_MAP_ANON const #

const _MAP_ANON = 0x20

_MAP_ANON const #

const _MAP_ANON = 0x100

_MAP_ANON const #

const _MAP_ANON = 0x1000

_MAP_ANON const #

const _MAP_ANON = 0x1000

_MAP_ANON const #

const _MAP_ANON = 0x800

_MAP_ANON const #

const _MAP_ANON = C.MAP_ANONYMOUS

_MAP_ANON const #

const _MAP_ANON = 0x1000

_MAP_ANON const #

const _MAP_ANON = 0x1000

_MAP_ANON const #

const _MAP_ANON = 0x10

_MAP_ANON const #

const _MAP_ANON = 0x1000

_MAP_ANON const #

const _MAP_ANON = 0x1000

_MAP_ANON const #

const _MAP_ANON = 0x20

_MAP_ANON const #

Constants

const _MAP_ANON = 0x20

_MAP_ANON const #

const _MAP_ANON = 0x1000

_MAP_ANON const #

const _MAP_ANON = 0x1000

_MAP_ANON const #

const _MAP_ANON = 0x1000

_MAP_ANON const #

const _MAP_ANON = 0x20

_MAP_ANON const #

const _MAP_ANON = 0x1000

_MAP_ANON const #

const _MAP_ANON = 0x1000

_MAP_ANON const #

const _MAP_ANON = 0x1000

_MAP_ANON const #

const _MAP_ANON = 0x1000

_MAP_ANON const #

const _MAP_ANON = 0x800

_MAP_ANON const #

const _MAP_ANON = 0x1000

_MAP_ANON const #

const _MAP_ANON = 0x1000

_MAP_ANON const #

const _MAP_ANON = 0x20

_MAP_ANON const #

const _MAP_ANON = 0x1000

_MAP_ANON const #

const _MAP_ANON = 0x20

_MAP_ANON const #

const _MAP_ANON = 0x20

_MAP_ANON const #

const _MAP_ANON = 0x20

_MAP_ANON const #

const _MAP_ANON = 0x20

_MAP_FIXED const #

const _MAP_FIXED = 0x10

_MAP_FIXED const #

const _MAP_FIXED = 0x10

_MAP_FIXED const #

const _MAP_FIXED = 0x10

_MAP_FIXED const #

const _MAP_FIXED = 0x10

_MAP_FIXED const #

const _MAP_FIXED = 0x10

_MAP_FIXED const #

const _MAP_FIXED = 0x10

_MAP_FIXED const #

const _MAP_FIXED = 0x10

_MAP_FIXED const #

const _MAP_FIXED = 0x10

_MAP_FIXED const #

const _MAP_FIXED = 0x10

_MAP_FIXED const #

const _MAP_FIXED = 0x10

_MAP_FIXED const #

Constants

const _MAP_FIXED = 0x10

_MAP_FIXED const #

const _MAP_FIXED = 0x10

_MAP_FIXED const #

const _MAP_FIXED = 0x10

_MAP_FIXED const #

const _MAP_FIXED = 0x10

_MAP_FIXED const #

const _MAP_FIXED = 0x10

_MAP_FIXED const #

const _MAP_FIXED = 0x10

_MAP_FIXED const #

const _MAP_FIXED = 0x10

_MAP_FIXED const #

const _MAP_FIXED = 0x10

_MAP_FIXED const #

const _MAP_FIXED = 0x10

_MAP_FIXED const #

const _MAP_FIXED = 0x10

_MAP_FIXED const #

const _MAP_FIXED = 0x10

_MAP_FIXED const #

const _MAP_FIXED = 0x10

_MAP_FIXED const #

const _MAP_FIXED = 0x10

_MAP_FIXED const #

const _MAP_FIXED = 0x10

_MAP_FIXED const #

const _MAP_FIXED = C.MAP_FIXED

_MAP_FIXED const #

const _MAP_FIXED = 0x10

_MAP_FIXED const #

const _MAP_FIXED = 0x10

_MAP_FIXED const #

const _MAP_FIXED = 0x10

_MAP_FIXED const #

const _MAP_FIXED = 0x10

_MAP_FIXED const #

const _MAP_FIXED = 0x10

_MAP_FIXED const #

const _MAP_FIXED = 0x100

_MAP_FIXED const #

const _MAP_FIXED = 0x10

_MAP_FIXED const #

const _MAP_FIXED = 0x10

_MAP_PRIVATE const #

const _MAP_PRIVATE = 0x2

_MAP_PRIVATE const #

const _MAP_PRIVATE = 0x2

_MAP_PRIVATE const #

const _MAP_PRIVATE = 0x2

_MAP_PRIVATE const #

const _MAP_PRIVATE = 0x2

_MAP_PRIVATE const #

const _MAP_PRIVATE = 0x2

_MAP_PRIVATE const #

const _MAP_PRIVATE = 0x2

_MAP_PRIVATE const #

const _MAP_PRIVATE = 0x2

_MAP_PRIVATE const #

const _MAP_PRIVATE = 0x2

_MAP_PRIVATE const #

const _MAP_PRIVATE = C.MAP_PRIVATE

_MAP_PRIVATE const #

const _MAP_PRIVATE = 0x2

_MAP_PRIVATE const #

const _MAP_PRIVATE = 0x2

_MAP_PRIVATE const #

const _MAP_PRIVATE = 2

_MAP_PRIVATE const #

const _MAP_PRIVATE = 0x2

_MAP_PRIVATE const #

const _MAP_PRIVATE = 0x2

_MAP_PRIVATE const #

const _MAP_PRIVATE = 0x2

_MAP_PRIVATE const #

const _MAP_PRIVATE = 0x2

_MAP_PRIVATE const #

const _MAP_PRIVATE = 0x2

_MAP_PRIVATE const #

const _MAP_PRIVATE = 0x2

_MAP_PRIVATE const #

Constants

const _MAP_PRIVATE = 0x2

_MAP_PRIVATE const #

const _MAP_PRIVATE = 0x2

_MAP_PRIVATE const #

const _MAP_PRIVATE = 0x2

_MAP_PRIVATE const #

const _MAP_PRIVATE = 0x2

_MAP_PRIVATE const #

const _MAP_PRIVATE = 0x2

_MAP_PRIVATE const #

const _MAP_PRIVATE = 0x2

_MAP_PRIVATE const #

const _MAP_PRIVATE = 0x2

_MAP_PRIVATE const #

const _MAP_PRIVATE = 0x2

_MAP_PRIVATE const #

const _MAP_PRIVATE = 0x2

_MAP_PRIVATE const #

const _MAP_PRIVATE = 0x2

_MAP_PRIVATE const #

const _MAP_PRIVATE = 0x2

_MAP_PRIVATE const #

const _MAP_PRIVATE = 0x2

_MAP_PRIVATE const #

const _MAP_PRIVATE = 0x2

_MAP_PRIVATE const #

const _MAP_PRIVATE = 0x2

_MAP_PRIVATE const #

const _MAP_PRIVATE = 0x2

_MAP_PRIVATE const #

const _MAP_PRIVATE = 0x2

_MAP_SHARED const #

const _MAP_SHARED = 0x1

_MAP_SHARED const #

const _MAP_SHARED = 0x1

_MAP_SHARED const #

const _MAP_SHARED = 0x1

_MAP_SHARED const #

const _MAP_SHARED = 0x1

_MAP_SHARED const #

const _MAP_SHARED = 0x1

_MAP_STACK const #

const _MAP_STACK = 0x4000

_MAP_STACK const #

const _MAP_STACK = 0x4000

_MAP_STACK const #

const _MAP_STACK = 0x4000

_MAP_STACK const #

const _MAP_STACK = 0x4000

_MAP_STACK const #

const _MAP_STACK = 0x4000

_MAP_STACK const #

const _MAP_STACK = 0x4000

_MAP_STACK const #

const _MAP_STACK = 0x4000

_MAXHOSTNAMELEN const #

const _MAXHOSTNAMELEN = 0x100

_MAX_PATH const #

const _MAX_PATH = 260

_MEM_COMMIT const #

const _MEM_COMMIT = 0x1000

_MEM_DECOMMIT const #

const _MEM_DECOMMIT = 0x4000

_MEM_RELEASE const #

const _MEM_RELEASE = 0x8000

_MEM_RESERVE const #

const _MEM_RESERVE = 0x2000

_MaxGcproc const #

Max number of threads to run garbage collection. 2, 3, and 4 are all plausible maximums depending on the hardware details of the machine. The garbage collector scales well to 32 cpus.

const _MaxGcproc = 32

_MaxSmallSize const #

const _MaxSmallSize = 32768

_NBBY const #

const _NBBY = 0x8

_NBBY const #

Local consts.

const _NBBY = C.NBBY

_NBBY const #

const _NBBY = 0x8

_NBBY const #

const _NBBY = 0x8

_NBBY const #

const _NBBY = 0x8

_NBBY const #

const _NBBY = 0x8

_NCONT const #

notify

const _NCONT = 0

_NDFLT const #

notify

const _NDFLT = 1

_NOTE_TRIGGER const #

const _NOTE_TRIGGER = 0x1000000

_NOTE_TRIGGER const #

const _NOTE_TRIGGER = 0x1000000

_NOTE_TRIGGER const #

const _NOTE_TRIGGER = 0x1000000

_NOTE_TRIGGER const #

const _NOTE_TRIGGER = 0x1000000

_NOTE_TRIGGER const #

const _NOTE_TRIGGER = 0x1000000

_NOTE_TRIGGER const #

const _NOTE_TRIGGER = 0x1000000

_NOTE_TRIGGER const #

const _NOTE_TRIGGER = 0x1000000

_NOTE_TRIGGER const #

const _NOTE_TRIGGER = 0x1000000

_NOTE_TRIGGER const #

const _NOTE_TRIGGER = 0x1000000

_NOTE_TRIGGER const #

const _NOTE_TRIGGER = 0x1000000

_NOTE_TRIGGER const #

const _NOTE_TRIGGER = 0x1000000

_NOTE_TRIGGER const #

const _NOTE_TRIGGER = 0x1000000

_NSIG const #

const _NSIG = 73

_NSIG const #

const _NSIG = 33

_NSIG const #

const _NSIG = 33

_NSIG const #

TODO(brainman): should not need those

const _NSIG = 65

_NSIG const #

const _NSIG = 65

_NSIG const #

const _NSIG = 33

_NSIG const #

const _NSIG = 14

_NSIG const #

const _NSIG = *ast.BinaryExpr

_NSIG const #

const _NSIG = 129

_NSIG const #

wasm has no signals

const _NSIG = 0

_NSIG const #

const _NSIG = 33

_NSIG const #

const _NSIG = 65

_NSIG const #

const _NSIG = 256

_NSIG const #

const _NSIG = 32

_NtAssociateWaitCompletionPacket var #

var _NtAssociateWaitCompletionPacket stdFunction

_NtCancelWaitCompletionPacket var #

var _NtCancelWaitCompletionPacket stdFunction

_NtCreateWaitCompletionPacket var #

Load ntdll.dll manually during startup, otherwise Mingw links wrong printf function to cgo executable (see issue 12030 for details).

var _NtCreateWaitCompletionPacket stdFunction

_NumSizeClasses const #

const _NumSizeClasses = 68

_NumStackOrders const #

Number of orders that get caching. Order 0 is FixedStack and each successive order is twice as large. We want to cache 2KB, 4KB, 8KB, and 16KB stacks. Larger stacks will be allocated directly. Since FixedStack is different on different systems, we must vary NumStackOrders to keep the same maximum cached size. OS | FixedStack | NumStackOrders -----------------+------------+--------------- linux/darwin/bsd | 2KB | 4 windows/32 | 4KB | 3 windows/64 | 8KB | 2 plan9 | 4KB | 3

const _NumStackOrders = *ast.BinaryExpr

_OCEXEC const #

open

const _OCEXEC = 32

_OEXCL const #

open

const _OEXCL = 0x1000

_OEXEC const #

open

const _OEXEC = 3

_ORCLOSE const #

open

const _ORCLOSE = 64

_ORDWR const #

open

const _ORDWR = 2

_OREAD const #

open

const _OREAD = 0

_OTRUNC const #

open

const _OTRUNC = 16

_OWRITE const #

open

const _OWRITE = 1

_O_CLOEXEC const #

const _O_CLOEXEC = 0x20000

_O_CLOEXEC const #

const _O_CLOEXEC = 0x10000

_O_CLOEXEC const #

const _O_CLOEXEC = 0x100000

_O_CLOEXEC const #

const _O_CLOEXEC = 0x10000

_O_CLOEXEC const #

const _O_CLOEXEC = 0x10000

_O_CLOEXEC const #

const _O_CLOEXEC = 0x400000

_O_CLOEXEC const #

const _O_CLOEXEC = 0x10000

_O_CLOEXEC const #

const _O_CLOEXEC = 0x100000

_O_CLOEXEC const #

const _O_CLOEXEC = 0x400000

_O_CLOEXEC const #

const _O_CLOEXEC = 0x100000

_O_CLOEXEC const #

const _O_CLOEXEC = 0x10000

_O_CLOEXEC const #

const _O_CLOEXEC = 0x80000

_O_CLOEXEC const #

const _O_CLOEXEC = 0x80000

_O_CLOEXEC const #

const _O_CLOEXEC = 0x80000

_O_CLOEXEC const #

const _O_CLOEXEC = 0x10000

_O_CLOEXEC const #

const _O_CLOEXEC = 0x80000

_O_CLOEXEC const #

const _O_CLOEXEC = 0x80000

_O_CLOEXEC const #

const _O_CLOEXEC = 0x800000

_O_CLOEXEC const #

const _O_CLOEXEC = 0x80000

_O_CLOEXEC const #

const _O_CLOEXEC = 0x100000

_O_CLOEXEC const #

const _O_CLOEXEC = 0x80000

_O_CLOEXEC const #

const _O_CLOEXEC = 0x80000

_O_CLOEXEC const #

const _O_CLOEXEC = 0x80000

_O_CLOEXEC const #

const _O_CLOEXEC = 0x10000

_O_CLOEXEC const #

Constants

const _O_CLOEXEC = 0x80000

_O_CLOEXEC const #

const _O_CLOEXEC = 0x400000

_O_CLOEXEC const #

const _O_CLOEXEC = 0x100000

_O_CLOEXEC const #

const _O_CLOEXEC = 0x400000

_O_CLOEXEC const #

const _O_CLOEXEC = 0x80000

_O_CREAT const #

const _O_CREAT = 0x200

_O_CREAT const #

const _O_CREAT = 0x200

_O_CREAT const #

const _O_CREAT = 0x200

_O_CREAT const #

const _O_CREAT = 0x40

_O_CREAT const #

const _O_CREAT = 0x200

_O_CREAT const #

const _O_CREAT = 0x200

_O_CREAT const #

const _O_CREAT = 0x200

_O_CREAT const #

const _O_CREAT = 0x100

_O_CREAT const #

const _O_CREAT = 0x200

_O_CREAT const #

const _O_CREAT = 0x200

_O_CREAT const #

const _O_CREAT = 0x200

_O_CREAT const #

const _O_CREAT = 0x200

_O_CREAT const #

const _O_CREAT = 0x40

_O_CREAT const #

const _O_CREAT = 0x40

_O_CREAT const #

Constants

const _O_CREAT = 0x40

_O_CREAT const #

const _O_CREAT = 0x200

_O_CREAT const #

const _O_CREAT = 0x200

_O_CREAT const #

const _O_CREAT = 0x40

_O_CREAT const #

const _O_CREAT = 0x200

_O_CREAT const #

const _O_CREAT = C.O_CREAT

_O_CREAT const #

const _O_CREAT = 0x200

_O_CREAT const #

const _O_CREAT = 0x200

_O_CREAT const #

const _O_CREAT = 0x200

_O_CREAT const #

const _O_CREAT = 0x40

_O_CREAT const #

const _O_CREAT = 0x200

_O_CREAT const #

const _O_CREAT = 0x40

_O_CREAT const #

const _O_CREAT = 0x100

_O_CREAT const #

const _O_CREAT = 0x200

_O_CREAT const #

const _O_CREAT = 0x40

_O_CREAT const #

const _O_CREAT = 0x200

_O_CREAT const #

const _O_CREAT = 0x40

_O_CREAT const #

const _O_CREAT = 0x100

_O_CREAT const #

const _O_CREAT = 0x100

_O_NONBLOCK const #

const _O_NONBLOCK = 0x800

_O_NONBLOCK const #

const _O_NONBLOCK = 0x4

_O_NONBLOCK const #

const _O_NONBLOCK = 0x800

_O_NONBLOCK const #

const _O_NONBLOCK = 0x800

_O_NONBLOCK const #

const _O_NONBLOCK = 0x4

_O_NONBLOCK const #

const _O_NONBLOCK = 0x800

_O_NONBLOCK const #

const _O_NONBLOCK = 0x4

_O_NONBLOCK const #

const _O_NONBLOCK = 0x4

_O_NONBLOCK const #

const _O_NONBLOCK = 0x800

_O_NONBLOCK const #

const _O_NONBLOCK = 0x80

_O_NONBLOCK const #

const _O_NONBLOCK = 0x4

_O_NONBLOCK const #

const _O_NONBLOCK = 0x4

_O_NONBLOCK const #

const _O_NONBLOCK = 0x4

_O_NONBLOCK const #

const _O_NONBLOCK = 0x4

_O_NONBLOCK const #

const _O_NONBLOCK = 0x4

_O_NONBLOCK const #

const _O_NONBLOCK = C.O_NONBLOCK

_O_NONBLOCK const #

const _O_NONBLOCK = 0x80

_O_NONBLOCK const #

const _O_NONBLOCK = 0x800

_O_NONBLOCK const #

const _O_NONBLOCK = 0x4

_O_NONBLOCK const #

const _O_NONBLOCK = 0x4

_O_NONBLOCK const #

const _O_NONBLOCK = 0x800

_O_NONBLOCK const #

const _O_NONBLOCK = 0x4

_O_NONBLOCK const #

const _O_NONBLOCK = 0x800

_O_NONBLOCK const #

const _O_NONBLOCK = 0x4

_O_NONBLOCK const #

const _O_NONBLOCK = 0x4

_O_NONBLOCK const #

const _O_NONBLOCK = 0x4

_O_NONBLOCK const #

Constants

const _O_NONBLOCK = 0x800

_O_NONBLOCK const #

const _O_NONBLOCK = 0x4

_O_NONBLOCK const #

const _O_NONBLOCK = 0x4

_O_NONBLOCK const #

const _O_NONBLOCK = 0x4

_O_NONBLOCK const #

const _O_NONBLOCK = 0x80

_O_NONBLOCK const #

const _O_NONBLOCK = 0x4

_O_NONBLOCK const #

const _O_NONBLOCK = 0x4

_O_RDONLY const #

const _O_RDONLY = 0x0

_O_RDONLY const #

const _O_RDONLY = 0x0

_O_RDONLY const #

const _O_RDONLY = 0x0

_O_RDONLY const #

const _O_RDONLY = 0x0

_O_RDONLY const #

const _O_RDONLY = 0x0

_O_RDONLY const #

Constants

const _O_RDONLY = 0

_O_RDONLY const #

const _O_RDONLY = 0x0

_O_RDONLY const #

const _O_RDONLY = 0x0

_O_RDONLY const #

const _O_RDONLY = 0x0

_O_RDONLY const #

const _O_RDONLY = 0x0

_O_RDONLY const #

const _O_RDONLY = 0x0

_O_RDONLY const #

const _O_RDONLY = C.O_RDONLY

_O_RDONLY const #

const _O_RDONLY = 0x0

_O_TRUNC const #

const _O_TRUNC = 0x200

_O_TRUNC const #

const _O_TRUNC = 0x200

_O_TRUNC const #

const _O_TRUNC = 0x400

_O_TRUNC const #

const _O_TRUNC = 0x400

_O_TRUNC const #

const _O_TRUNC = 0x400

_O_TRUNC const #

const _O_TRUNC = 0x400

_O_TRUNC const #

const _O_TRUNC = 0x400

_O_TRUNC const #

const _O_TRUNC = 0x400

_O_TRUNC const #

const _O_TRUNC = C.O_TRUNC

_O_TRUNC const #

const _O_TRUNC = 0x200

_O_TRUNC const #

const _O_TRUNC = 0x200

_O_TRUNC const #

const _O_TRUNC = 0x200

_O_TRUNC const #

const _O_TRUNC = 0x400

_O_TRUNC const #

const _O_TRUNC = 0x400

_O_TRUNC const #

const _O_TRUNC = 0x400

_O_TRUNC const #

const _O_TRUNC = 0x400

_O_TRUNC const #

const _O_TRUNC = 0x200

_O_TRUNC const #

const _O_TRUNC = 0x400

_O_TRUNC const #

const _O_TRUNC = 0x200

_O_TRUNC const #

const _O_TRUNC = 0x200

_O_TRUNC const #

const _O_TRUNC = 0x200

_O_TRUNC const #

const _O_TRUNC = 0x400

_O_TRUNC const #

const _O_TRUNC = 0x400

_O_TRUNC const #

Constants

const _O_TRUNC = 0x200

_O_TRUNC const #

const _O_TRUNC = 0x400

_O_TRUNC const #

const _O_TRUNC = 0x200

_O_TRUNC const #

const _O_TRUNC = 0x200

_O_TRUNC const #

const _O_TRUNC = 0x400

_O_TRUNC const #

const _O_TRUNC = 0x400

_O_TRUNC const #

const _O_TRUNC = 0x400

_O_TRUNC const #

const _O_TRUNC = 0x400

_O_TRUNC const #

const _O_TRUNC = 0x400

_O_TRUNC const #

const _O_TRUNC = 0x200

_O_WRONLY const #

const _O_WRONLY = 0x1

_O_WRONLY const #

const _O_WRONLY = 0x1

_O_WRONLY const #

const _O_WRONLY = 0x1

_O_WRONLY const #

const _O_WRONLY = 0x1

_O_WRONLY const #

const _O_WRONLY = 0x1

_O_WRONLY const #

const _O_WRONLY = 0x1

_O_WRONLY const #

const _O_WRONLY = 0x1

_O_WRONLY const #

const _O_WRONLY = 0x1

_O_WRONLY const #

const _O_WRONLY = 0x1

_O_WRONLY const #

const _O_WRONLY = 0x1

_O_WRONLY const #

const _O_WRONLY = 0x1

_O_WRONLY const #

const _O_WRONLY = 0x1

_O_WRONLY const #

const _O_WRONLY = 0x1

_O_WRONLY const #

const _O_WRONLY = 0x1

_O_WRONLY const #

const _O_WRONLY = 0x1

_O_WRONLY const #

const _O_WRONLY = 0x1

_O_WRONLY const #

const _O_WRONLY = 0x1

_O_WRONLY const #

const _O_WRONLY = 0x1

_O_WRONLY const #

const _O_WRONLY = 0x1

_O_WRONLY const #

const _O_WRONLY = 0x1

_O_WRONLY const #

const _O_WRONLY = 0x1

_O_WRONLY const #

const _O_WRONLY = 0x1

_O_WRONLY const #

const _O_WRONLY = 0x1

_O_WRONLY const #

const _O_WRONLY = C.O_WRONLY

_O_WRONLY const #

const _O_WRONLY = 0x1

_O_WRONLY const #

const _O_WRONLY = 0x1

_O_WRONLY const #

const _O_WRONLY = 0x1

_O_WRONLY const #

const _O_WRONLY = 0x1

_O_WRONLY const #

const _O_WRONLY = 0x1

_O_WRONLY const #

const _O_WRONLY = 0x1

_O_WRONLY const #

const _O_WRONLY = 0x1

_O_WRONLY const #

const _O_WRONLY = 0x1

_O_WRONLY const #

Constants

const _O_WRONLY = 0x1

_PAGESIZE const #

const _PAGESIZE = 0x1000

_PAGESIZE const #

const _PAGESIZE = 0x1000

_PAGESIZE const #

const _PAGESIZE = 0x1000

_PAGE_NOACCESS const #

const _PAGE_NOACCESS = 0x0001

_PAGE_READWRITE const #

const _PAGE_READWRITE = 0x0004

_POLLERR const #

const _POLLERR = 0x4000

_POLLERR const #

const _POLLERR = 0x8

_POLLHUP const #

const _POLLHUP = 0x2000

_POLLHUP const #

const _POLLHUP = 0x10

_POLLIN const #

const _POLLIN = 0x0001

_POLLIN const #

const _POLLIN = 0x1

_POLLOUT const #

const _POLLOUT = 0x4

_POLLOUT const #

const _POLLOUT = 0x0002

_PORT_ALERT_UPDATE const #

const _PORT_ALERT_UPDATE = 0x2

_PORT_SOURCE_ALERT const #

const _PORT_SOURCE_ALERT = 0x5

_PORT_SOURCE_FD const #

const _PORT_SOURCE_FD = 0x4

_PROT_EXEC const #

Constants

const _PROT_EXEC = 0x4

_PROT_EXEC const #

const _PROT_EXEC = 0x4

_PROT_EXEC const #

const _PROT_EXEC = 0x4

_PROT_EXEC const #

const _PROT_EXEC = 0x4

_PROT_EXEC const #

const _PROT_EXEC = 0x4

_PROT_EXEC const #

const _PROT_EXEC = 0x4

_PROT_EXEC const #

const _PROT_EXEC = 0x4

_PROT_EXEC const #

const _PROT_EXEC = 0x4

_PROT_EXEC const #

const _PROT_EXEC = 0x4

_PROT_EXEC const #

const _PROT_EXEC = 0x4

_PROT_EXEC const #

const _PROT_EXEC = 0x4

_PROT_EXEC const #

const _PROT_EXEC = C.PROT_EXEC

_PROT_EXEC const #

const _PROT_EXEC = 0x4

_PROT_EXEC const #

const _PROT_EXEC = 0x4

_PROT_EXEC const #

const _PROT_EXEC = 0x4

_PROT_EXEC const #

const _PROT_EXEC = 0x4

_PROT_EXEC const #

const _PROT_EXEC = 0x4

_PROT_EXEC const #

const _PROT_EXEC = 0x4

_PROT_EXEC const #

const _PROT_EXEC = 4

_PROT_EXEC const #

const _PROT_EXEC = 0x4

_PROT_EXEC const #

const _PROT_EXEC = 0x4

_PROT_EXEC const #

const _PROT_EXEC = 0x4

_PROT_EXEC const #

const _PROT_EXEC = 0x4

_PROT_EXEC const #

const _PROT_EXEC = 0x4

_PROT_EXEC const #

const _PROT_EXEC = 0x4

_PROT_EXEC const #

const _PROT_EXEC = 0x4

_PROT_EXEC const #

const _PROT_EXEC = 0x4

_PROT_EXEC const #

const _PROT_EXEC = 0x4

_PROT_EXEC const #

const _PROT_EXEC = 0x4

_PROT_EXEC const #

const _PROT_EXEC = 0x4

_PROT_EXEC const #

const _PROT_EXEC = 0x4

_PROT_EXEC const #

const _PROT_EXEC = 0x4

_PROT_EXEC const #

const _PROT_EXEC = 0x4

_PROT_EXEC const #

const _PROT_EXEC = 0x4

_PROT_NONE const #

const _PROT_NONE = 0x0

_PROT_NONE const #

const _PROT_NONE = 0x0

_PROT_NONE const #

const _PROT_NONE = 0x0

_PROT_NONE const #

const _PROT_NONE = 0x0

_PROT_NONE const #

const _PROT_NONE = 0x0

_PROT_NONE const #

const _PROT_NONE = 0x0

_PROT_NONE const #

const _PROT_NONE = 0x0

_PROT_NONE const #

const _PROT_NONE = 0

_PROT_NONE const #

const _PROT_NONE = 0x0

_PROT_NONE const #

const _PROT_NONE = 0x0

_PROT_NONE const #

const _PROT_NONE = 0x0

_PROT_NONE const #

Constants

const _PROT_NONE = 0

_PROT_NONE const #

const _PROT_NONE = 0x0

_PROT_NONE const #

const _PROT_NONE = 0x0

_PROT_NONE const #

const _PROT_NONE = 0x0

_PROT_NONE const #

const _PROT_NONE = 0x0

_PROT_NONE const #

const _PROT_NONE = 0x0

_PROT_NONE const #

const _PROT_NONE = 0x0

_PROT_NONE const #

const _PROT_NONE = 0x0

_PROT_NONE const #

const _PROT_NONE = 0x0

_PROT_NONE const #

const _PROT_NONE = 0x0

_PROT_NONE const #

const _PROT_NONE = 0x0

_PROT_NONE const #

const _PROT_NONE = 0x0

_PROT_NONE const #

const _PROT_NONE = 0x0

_PROT_NONE const #

const _PROT_NONE = 0x0

_PROT_NONE const #

const _PROT_NONE = 0x0

_PROT_NONE const #

const _PROT_NONE = 0x0

_PROT_NONE const #

const _PROT_NONE = C.PROT_NONE

_PROT_NONE const #

const _PROT_NONE = 0x0

_PROT_NONE const #

const _PROT_NONE = 0x0

_PROT_NONE const #

const _PROT_NONE = 0x0

_PROT_NONE const #

const _PROT_NONE = 0x0

_PROT_NONE const #

const _PROT_NONE = 0x0

_PROT_NONE const #

const _PROT_NONE = 0x0

_PROT_READ const #

const _PROT_READ = 0x1

_PROT_READ const #

const _PROT_READ = 0x1

_PROT_READ const #

const _PROT_READ = 0x1

_PROT_READ const #

const _PROT_READ = 0x1

_PROT_READ const #

const _PROT_READ = 0x1

_PROT_READ const #

const _PROT_READ = 0x1

_PROT_READ const #

const _PROT_READ = 0x1

_PROT_READ const #

const _PROT_READ = 0x1

_PROT_READ const #

const _PROT_READ = 0x1

_PROT_READ const #

const _PROT_READ = 0x1

_PROT_READ const #

const _PROT_READ = 0x1

_PROT_READ const #

const _PROT_READ = 0x1

_PROT_READ const #

const _PROT_READ = 0x1

_PROT_READ const #

const _PROT_READ = 0x1

_PROT_READ const #

const _PROT_READ = 0x1

_PROT_READ const #

const _PROT_READ = 0x1

_PROT_READ const #

const _PROT_READ = 0x1

_PROT_READ const #

const _PROT_READ = 0x1

_PROT_READ const #

const _PROT_READ = 0x1

_PROT_READ const #

const _PROT_READ = 0x1

_PROT_READ const #

const _PROT_READ = 1

_PROT_READ const #

const _PROT_READ = 0x1

_PROT_READ const #

const _PROT_READ = 0x1

_PROT_READ const #

const _PROT_READ = 0x1

_PROT_READ const #

const _PROT_READ = 0x1

_PROT_READ const #

const _PROT_READ = 0x1

_PROT_READ const #

const _PROT_READ = C.PROT_READ

_PROT_READ const #

const _PROT_READ = 0x1

_PROT_READ const #

const _PROT_READ = 0x1

_PROT_READ const #

const _PROT_READ = 0x1

_PROT_READ const #

const _PROT_READ = 0x1

_PROT_READ const #

const _PROT_READ = 0x1

_PROT_READ const #

Constants

const _PROT_READ = 0x1

_PROT_READ const #

const _PROT_READ = 0x1

_PROT_WRITE const #

const _PROT_WRITE = 0x2

_PROT_WRITE const #

const _PROT_WRITE = 0x2

_PROT_WRITE const #

const _PROT_WRITE = 0x2

_PROT_WRITE const #

const _PROT_WRITE = 0x2

_PROT_WRITE const #

const _PROT_WRITE = 0x2

_PROT_WRITE const #

const _PROT_WRITE = 0x2

_PROT_WRITE const #

const _PROT_WRITE = 0x2

_PROT_WRITE const #

const _PROT_WRITE = 0x2

_PROT_WRITE const #

Constants

const _PROT_WRITE = 0x2

_PROT_WRITE const #

const _PROT_WRITE = 0x2

_PROT_WRITE const #

const _PROT_WRITE = 0x2

_PROT_WRITE const #

const _PROT_WRITE = 0x2

_PROT_WRITE const #

const _PROT_WRITE = 0x2

_PROT_WRITE const #

const _PROT_WRITE = 0x2

_PROT_WRITE const #

const _PROT_WRITE = C.PROT_WRITE

_PROT_WRITE const #

const _PROT_WRITE = 0x2

_PROT_WRITE const #

const _PROT_WRITE = 0x2

_PROT_WRITE const #

const _PROT_WRITE = 0x2

_PROT_WRITE const #

const _PROT_WRITE = 0x2

_PROT_WRITE const #

const _PROT_WRITE = 0x2

_PROT_WRITE const #

const _PROT_WRITE = 0x2

_PROT_WRITE const #

const _PROT_WRITE = 2

_PROT_WRITE const #

const _PROT_WRITE = 0x2

_PROT_WRITE const #

const _PROT_WRITE = 0x2

_PROT_WRITE const #

const _PROT_WRITE = 0x2

_PROT_WRITE const #

const _PROT_WRITE = 0x2

_PROT_WRITE const #

const _PROT_WRITE = 0x2

_PROT_WRITE const #

const _PROT_WRITE = 0x2

_PROT_WRITE const #

const _PROT_WRITE = 0x2

_PROT_WRITE const #

const _PROT_WRITE = 0x2

_PROT_WRITE const #

const _PROT_WRITE = 0x2

_PROT_WRITE const #

const _PROT_WRITE = 0x2

_PROT_WRITE const #

const _PROT_WRITE = 0x2

_PROT_WRITE const #

const _PROT_WRITE = 0x2

_PTHREAD_CREATE_DETACHED const #

const _PTHREAD_CREATE_DETACHED = C.PTHREAD_CREATE_DETACHED

_PTHREAD_CREATE_DETACHED const #

const _PTHREAD_CREATE_DETACHED = 0x1

_PTHREAD_CREATE_DETACHED const #

const _PTHREAD_CREATE_DETACHED = 0x1

_PTHREAD_CREATE_DETACHED const #

const _PTHREAD_CREATE_DETACHED = 0x1

_PTHREAD_CREATE_DETACHED const #

const _PTHREAD_CREATE_DETACHED = 0x1

_PTHREAD_CREATE_DETACHED const #

const _PTHREAD_CREATE_DETACHED = 0x1

_PTHREAD_CREATE_DETACHED const #

const _PTHREAD_CREATE_DETACHED = 0x2

_PTHREAD_CREATE_DETACHED const #

const _PTHREAD_CREATE_DETACHED = 0x1

_PTHREAD_CREATE_DETACHED const #

const _PTHREAD_CREATE_DETACHED = 0x1

_PTHREAD_CREATE_DETACHED const #

const _PTHREAD_CREATE_DETACHED = 0x40

_PTHREAD_CREATE_DETACHED const #

const _PTHREAD_CREATE_DETACHED = 0x2

_PTHREAD_KEYS_MAX const #

const _PTHREAD_KEYS_MAX = 512

_PT_DYNAMIC const #

const _PT_DYNAMIC = 2

_PT_LOAD const #

const _PT_LOAD = 1

_PageMask const #

const _PageMask = *ast.BinaryExpr

_PageShift const #

const _PageShift = 13

_PageSize const #

const _PageSize = *ast.BinaryExpr

_Pdead const #

_Pdead means a P is no longer used (GOMAXPROCS shrank). We reuse Ps if GOMAXPROCS increases. A dead P is mostly stripped of its resources, though a few things remain (e.g., trace buffers).

const _Pdead

_Pgcstop const #

_Pgcstop means a P is halted for STW and owned by the M that stopped the world. The M that stopped the world continues to use its P, even in _Pgcstop. Transitioning from _Prunning to _Pgcstop causes an M to release its P and park. The P retains its run queue and startTheWorld will restart the scheduler on Ps with non-empty run queues.

const _Pgcstop

_Pidle const #

_Pidle means a P is not being used to run user code or the scheduler. Typically, it's on the idle P list and available to the scheduler, but it may just be transitioning between other states. The P is owned by the idle list or by whatever is transitioning its state. Its run queue is empty.

const _Pidle = iota

_PostQueuedCompletionStatus var #

Following syscalls are available on every Windows PC. All these variables are set by the Windows executable loader before the Go program starts.

var _PostQueuedCompletionStatus stdFunction

_ProcessPrng var #

Use ProcessPrng to generate cryptographically random data.

var _ProcessPrng stdFunction

_Prunning const #

_Prunning means a P is owned by an M and is being used to run user code or the scheduler. Only the M that owns this P is allowed to change the P's status from _Prunning. The M may transition the P to _Pidle (if it has no more work to do), _Psyscall (when entering a syscall), or _Pgcstop (to halt for the GC). The M may also hand ownership of the P off directly to another M (e.g., to schedule a locked G).

const _Prunning

_Psyscall const #

_Psyscall means a P is not running user code. It has affinity to an M in a syscall but is not owned by it and may be stolen by another M. This is similar to _Pidle but uses lightweight transitions and maintains M affinity. Leaving _Psyscall must be done with a CAS, either to steal or retake the P. Note that there's an ABA hazard: even if an M successfully CASes its original P back to _Prunning after a syscall, it must understand the P may have been used by another M in the interim.

const _Psyscall

_QueryPerformanceCounter var #

Following syscalls are available on every Windows PC. All these variables are set by the Windows executable loader before the Go program starts.

var _QueryPerformanceCounter stdFunction

_QueryPerformanceFrequency var #

Following syscalls are available on every Windows PC. All these variables are set by the Windows executable loader before the Go program starts.

var _QueryPerformanceFrequency stdFunction

_RCTL_FIRST const #

const _RCTL_FIRST = 0x0

_RCTL_LOCAL_DENY const #

const _RCTL_LOCAL_DENY = 0x2

_RCTL_LOCAL_MAXIMAL const #

const _RCTL_LOCAL_MAXIMAL = 0x80000000

_RCTL_NEXT const #

const _RCTL_NEXT = 0x1

_REG_CPSR const #

const _REG_CPSR = 0x10

_REG_CS const #

const _REG_CS = 0x12

_REG_CS const #

const _REG_CS = 0x16

_REG_CS const #

const _REG_CS = 0xf

_REG_DS const #

const _REG_DS = 0x19

_REG_DS const #

const _REG_DS = 0x3

_REG_DS const #

const _REG_DS = 0x12

_REG_EAX const #

const _REG_EAX = 0xb

_REG_EBP const #

const _REG_EBP = 0x6

_REG_EBX const #

const _REG_EBX = 0x8

_REG_ECX const #

const _REG_ECX = 0xa

_REG_EDI const #

const _REG_EDI = 0x4

_REG_EDX const #

const _REG_EDX = 0x9

_REG_EFL const #

const _REG_EFL = 0x10

_REG_EIP const #

const _REG_EIP = 0xe

_REG_ELR const #

const _REG_ELR = 32

_REG_ERR const #

const _REG_ERR = 0xd

_REG_ERR const #

const _REG_ERR = 0x14

_REG_ERR const #

const _REG_ERR = 0x10

_REG_ES const #

const _REG_ES = 0x2

_REG_ES const #

const _REG_ES = 0x18

_REG_ES const #

const _REG_ES = 0x11

_REG_ESI const #

const _REG_ESI = 0x5

_REG_ESP const #

const _REG_ESP = 0x7

_REG_FS const #

const _REG_FS = 0x10

_REG_FS const #

const _REG_FS = 0x16

_REG_FS const #

const _REG_FS = 0x1

_REG_GS const #

const _REG_GS = 0x0

_REG_GS const #

const _REG_GS = 0xf

_REG_GS const #

const _REG_GS = 0x17

_REG_R0 const #

const _REG_R0 = 0x0

_REG_R1 const #

const _REG_R1 = 0x1

_REG_R10 const #

const _REG_R10 = 0x5

_REG_R10 const #

const _REG_R10 = 0x6

_REG_R10 const #

const _REG_R10 = 0xa

_REG_R11 const #

const _REG_R11 = 0x4

_REG_R11 const #

const _REG_R11 = 0xb

_REG_R11 const #

const _REG_R11 = 0x7

_REG_R12 const #

const _REG_R12 = 0xc

_REG_R12 const #

const _REG_R12 = 0x8

_REG_R12 const #

const _REG_R12 = 0x3

_REG_R13 const #

const _REG_R13 = 0x9

_REG_R13 const #

const _REG_R13 = 0xd

_REG_R13 const #

const _REG_R13 = 0x2

_REG_R14 const #

const _REG_R14 = 0xe

_REG_R14 const #

const _REG_R14 = 0x1

_REG_R14 const #

const _REG_R14 = 0xa

_REG_R15 const #

const _REG_R15 = 0xf

_REG_R15 const #

const _REG_R15 = 0xb

_REG_R15 const #

const _REG_R15 = 0x0

_REG_R2 const #

const _REG_R2 = 0x2

_REG_R3 const #

const _REG_R3 = 0x3

_REG_R4 const #

const _REG_R4 = 0x4

_REG_R5 const #

const _REG_R5 = 0x5

_REG_R6 const #

const _REG_R6 = 0x6

_REG_R7 const #

const _REG_R7 = 0x7

_REG_R8 const #

const _REG_R8 = 0x4

_REG_R8 const #

const _REG_R8 = 0x8

_REG_R8 const #

const _REG_R8 = 0x7

_REG_R9 const #

const _REG_R9 = 0x9

_REG_R9 const #

const _REG_R9 = 0x6

_REG_R9 const #

const _REG_R9 = 0x5

_REG_RAX const #

const _REG_RAX = 0xe

_REG_RAX const #

const _REG_RAX = 0xe

_REG_RBP const #

const _REG_RBP = 0xc

_REG_RBP const #

const _REG_RBP = 0xa

_REG_RBX const #

const _REG_RBX = 0xd

_REG_RBX const #

const _REG_RBX = 0xb

_REG_RCX const #

const _REG_RCX = 0xd

_REG_RCX const #

const _REG_RCX = 0x3

_REG_RDI const #

const _REG_RDI = 0x0

_REG_RDI const #

const _REG_RDI = 0x8

_REG_RDX const #

const _REG_RDX = 0x2

_REG_RDX const #

const _REG_RDX = 0xc

_REG_RFLAGS const #

const _REG_RFLAGS = 0x13

_REG_RFLAGS const #

const _REG_RFLAGS = 0x17

_REG_RIP const #

const _REG_RIP = 0x11

_REG_RIP const #

const _REG_RIP = 0x15

_REG_RSI const #

const _REG_RSI = 0x1

_REG_RSI const #

const _REG_RSI = 0x9

_REG_RSP const #

const _REG_RSP = 0x14

_REG_RSP const #

const _REG_RSP = 0x18

_REG_SPSR const #

const _REG_SPSR = 33

_REG_SS const #

const _REG_SS = 0x12

_REG_SS const #

const _REG_SS = 0x15

_REG_SS const #

const _REG_SS = 0x19

_REG_TPIDR const #

const _REG_TPIDR = 34

_REG_TRAPNO const #

const _REG_TRAPNO = 0xc

_REG_TRAPNO const #

const _REG_TRAPNO = 0x13

_REG_TRAPNO const #

const _REG_TRAPNO = 0xf

_REG_UESP const #

const _REG_UESP = 0x11

_REG_X0 const #

const _REG_X0 = 0

_REG_X1 const #

const _REG_X1 = 1

_REG_X10 const #

const _REG_X10 = 10

_REG_X11 const #

const _REG_X11 = 11

_REG_X12 const #

const _REG_X12 = 12

_REG_X13 const #

const _REG_X13 = 13

_REG_X14 const #

const _REG_X14 = 14

_REG_X15 const #

const _REG_X15 = 15

_REG_X16 const #

const _REG_X16 = 16

_REG_X17 const #

const _REG_X17 = 17

_REG_X18 const #

const _REG_X18 = 18

_REG_X19 const #

const _REG_X19 = 19

_REG_X2 const #

const _REG_X2 = 2

_REG_X20 const #

const _REG_X20 = 20

_REG_X21 const #

const _REG_X21 = 21

_REG_X22 const #

const _REG_X22 = 22

_REG_X23 const #

const _REG_X23 = 23

_REG_X24 const #

const _REG_X24 = 24

_REG_X25 const #

const _REG_X25 = 25

_REG_X26 const #

const _REG_X26 = 26

_REG_X27 const #

const _REG_X27 = 27

_REG_X28 const #

const _REG_X28 = 28

_REG_X29 const #

const _REG_X29 = 29

_REG_X3 const #

const _REG_X3 = 3

_REG_X30 const #

const _REG_X30 = 30

_REG_X31 const #

const _REG_X31 = 31

_REG_X4 const #

const _REG_X4 = 4

_REG_X5 const #

const _REG_X5 = 5

_REG_X6 const #

const _REG_X6 = 6

_REG_X7 const #

const _REG_X7 = 7

_REG_X8 const #

const _REG_X8 = 8

_REG_X9 const #

const _REG_X9 = 9

_RFCENVG const #

rfork

const _RFCENVG = *ast.BinaryExpr

_RFCFDG const #

rfork

const _RFCFDG = *ast.BinaryExpr

_RFCNAMEG const #

rfork

const _RFCNAMEG = *ast.BinaryExpr

_RFENVG const #

rfork

const _RFENVG = *ast.BinaryExpr

_RFFDG const #

rfork

const _RFFDG = *ast.BinaryExpr

_RFMEM const #

rfork

const _RFMEM = *ast.BinaryExpr

_RFNAMEG const #

rfork

const _RFNAMEG = *ast.BinaryExpr

_RFNOMNT const #

rfork

const _RFNOMNT = *ast.BinaryExpr

_RFNOTEG const #

rfork

const _RFNOTEG = *ast.BinaryExpr

_RFNOWAIT const #

rfork

const _RFNOWAIT = *ast.BinaryExpr

_RFPROC const #

rfork

const _RFPROC = *ast.BinaryExpr

_RFREND const #

rfork

const _RFREND = *ast.BinaryExpr

_RaiseFailFastException var #

Following syscalls are available on every Windows PC. All these variables are set by the Windows executable loader before the Go program starts.

var _RaiseFailFastException stdFunction

_ResumeThread var #

Following syscalls are available on every Windows PC. All these variables are set by the Windows executable loader before the Go program starts.

var _ResumeThread stdFunction

_RtlGetCurrentPeb var #

var _RtlGetCurrentPeb stdFunction

_RtlGetVersion var #

var _RtlGetVersion stdFunction

_RtlLookupFunctionEntry var #

Following syscalls are available on every Windows PC. All these variables are set by the Windows executable loader before the Go program starts.

var _RtlLookupFunctionEntry stdFunction

_RtlVirtualUnwind var #

Following syscalls are available on every Windows PC. All these variables are set by the Windows executable loader before the Go program starts.

var _RtlVirtualUnwind stdFunction

_SA_64REGSET const #

const _SA_64REGSET = 0x200

_SA_64REGSET const #

const _SA_64REGSET = 0x200

_SA_ONSTACK const #

const _SA_ONSTACK = 0x1

_SA_ONSTACK const #

const _SA_ONSTACK = 0x1

_SA_ONSTACK const #

const _SA_ONSTACK = 0x8000000

_SA_ONSTACK const #

const _SA_ONSTACK = 0x8000000

_SA_ONSTACK const #

const _SA_ONSTACK = 0x8000000

_SA_ONSTACK const #

const _SA_ONSTACK = 0x1

_SA_ONSTACK const #

const _SA_ONSTACK = 0x1

_SA_ONSTACK const #

const _SA_ONSTACK = 0x1

_SA_ONSTACK const #

const _SA_ONSTACK = 0x1

_SA_ONSTACK const #

const _SA_ONSTACK = 0x1

_SA_ONSTACK const #

const _SA_ONSTACK = 0x1

_SA_ONSTACK const #

const _SA_ONSTACK = 0x8000000

_SA_ONSTACK const #

const _SA_ONSTACK = 0x8000000

_SA_ONSTACK const #

const _SA_ONSTACK = 0x8000000

_SA_ONSTACK const #

const _SA_ONSTACK = 0x1

_SA_ONSTACK const #

const _SA_ONSTACK = 0x1

_SA_ONSTACK const #

Constants

const _SA_ONSTACK = 0x8000000

_SA_ONSTACK const #

const _SA_ONSTACK = 0x1

_SA_ONSTACK const #

const _SA_ONSTACK = 0x1

_SA_ONSTACK const #

const _SA_ONSTACK = 0x8000000

_SA_ONSTACK const #

const _SA_ONSTACK = 0x1

_SA_ONSTACK const #

const _SA_ONSTACK = 0x1

_SA_ONSTACK const #

const _SA_ONSTACK = 0x1

_SA_ONSTACK const #

const _SA_ONSTACK = C.SA_ONSTACK

_SA_ONSTACK const #

const _SA_ONSTACK = 0x8000000

_SA_ONSTACK const #

const _SA_ONSTACK = 0x1

_SA_ONSTACK const #

const _SA_ONSTACK = 0x8000000

_SA_ONSTACK const #

const _SA_ONSTACK = 0x1

_SA_ONSTACK const #

const _SA_ONSTACK = 0x8000000

_SA_ONSTACK const #

const _SA_ONSTACK = 0x1

_SA_ONSTACK const #

const _SA_ONSTACK = 0x1

_SA_ONSTACK const #

const _SA_ONSTACK = 0x1

_SA_ONSTACK const #

const _SA_ONSTACK = 0x1

_SA_RESTART const #

const _SA_RESTART = 0x2

_SA_RESTART const #

const _SA_RESTART = 0x2

_SA_RESTART const #

const _SA_RESTART = 0x2

_SA_RESTART const #

const _SA_RESTART = 0x10000000

_SA_RESTART const #

const _SA_RESTART = 0x10000000

_SA_RESTART const #

const _SA_RESTART = 0x2

_SA_RESTART const #

const _SA_RESTART = 0x4

_SA_RESTART const #

const _SA_RESTART = 0x2

_SA_RESTART const #

const _SA_RESTART = 0x2

_SA_RESTART const #

const _SA_RESTART = 0x2

_SA_RESTART const #

const _SA_RESTART = 0x2

_SA_RESTART const #

const _SA_RESTART = 0x2

_SA_RESTART const #

const _SA_RESTART = 0x2

_SA_RESTART const #

const _SA_RESTART = 0x2

_SA_RESTART const #

const _SA_RESTART = 0x10000000

_SA_RESTART const #

const _SA_RESTART = 0x10000000

_SA_RESTART const #

const _SA_RESTART = 0x10000000

_SA_RESTART const #

const _SA_RESTART = 0x8

_SA_RESTART const #

const _SA_RESTART = 0x2

_SA_RESTART const #

const _SA_RESTART = 0x2

_SA_RESTART const #

const _SA_RESTART = 0x2

_SA_RESTART const #

const _SA_RESTART = 0x10000000

_SA_RESTART const #

const _SA_RESTART = 0x2

_SA_RESTART const #

const _SA_RESTART = 0x2

_SA_RESTART const #

const _SA_RESTART = C.SA_RESTART

_SA_RESTART const #

const _SA_RESTART = 0x10000000

_SA_RESTART const #

const _SA_RESTART = 0x10000000

_SA_RESTART const #

Constants

const _SA_RESTART = 0x10000000

_SA_RESTART const #

const _SA_RESTART = 0x2

_SA_RESTART const #

const _SA_RESTART = 0x2

_SA_RESTART const #

const _SA_RESTART = 0x10000000

_SA_RESTART const #

const _SA_RESTART = 0x10000000

_SA_RESTART const #

const _SA_RESTART = 0x2

_SA_RESTORER const #

const _SA_RESTORER = 0x0

_SA_RESTORER const #

const _SA_RESTORER = 0

_SA_RESTORER const #

const _SA_RESTORER = 0x0

_SA_RESTORER const #

const _SA_RESTORER = 0

_SA_RESTORER const #

Constants

const _SA_RESTORER = 0

_SA_RESTORER const #

const _SA_RESTORER = 0x4000000

_SA_RESTORER const #

const _SA_RESTORER = 0

_SA_RESTORER const #

const _SA_RESTORER = 0x4000000

_SA_RESTORER const #

const _SA_RESTORER = 0

_SA_RESTORER const #

const _SA_RESTORER = 0

_SA_RESTORER const #

const _SA_RESTORER = 0x0

_SA_SIGINFO const #

const _SA_SIGINFO = 0x4

_SA_SIGINFO const #

const _SA_SIGINFO = 0x40

_SA_SIGINFO const #

const _SA_SIGINFO = 0x8

_SA_SIGINFO const #

const _SA_SIGINFO = 0x4

_SA_SIGINFO const #

const _SA_SIGINFO = 0x4

_SA_SIGINFO const #

const _SA_SIGINFO = 0x40

_SA_SIGINFO const #

const _SA_SIGINFO = 0x4

_SA_SIGINFO const #

const _SA_SIGINFO = 0x40

_SA_SIGINFO const #

const _SA_SIGINFO = 0x8

_SA_SIGINFO const #

const _SA_SIGINFO = 0x100

_SA_SIGINFO const #

const _SA_SIGINFO = 0x40

_SA_SIGINFO const #

const _SA_SIGINFO = 0x40

_SA_SIGINFO const #

const _SA_SIGINFO = 0x40

_SA_SIGINFO const #

const _SA_SIGINFO = 0x4

_SA_SIGINFO const #

const _SA_SIGINFO = 0x40

_SA_SIGINFO const #

const _SA_SIGINFO = 0x4

_SA_SIGINFO const #

const _SA_SIGINFO = 0x40

_SA_SIGINFO const #

const _SA_SIGINFO = C.SA_SIGINFO

_SA_SIGINFO const #

const _SA_SIGINFO = 0x40

_SA_SIGINFO const #

const _SA_SIGINFO = 0x40

_SA_SIGINFO const #

const _SA_SIGINFO = 0x40

_SA_SIGINFO const #

const _SA_SIGINFO = 0x40

_SA_SIGINFO const #

const _SA_SIGINFO = 0x40

_SA_SIGINFO const #

const _SA_SIGINFO = 0x40

_SA_SIGINFO const #

const _SA_SIGINFO = 0x8

_SA_SIGINFO const #

const _SA_SIGINFO = 0x4

_SA_SIGINFO const #

const _SA_SIGINFO = 0x40

_SA_SIGINFO const #

const _SA_SIGINFO = 0x40

_SA_SIGINFO const #

const _SA_SIGINFO = 0x40

_SA_SIGINFO const #

Constants

const _SA_SIGINFO = 0x4

_SA_SIGINFO const #

const _SA_SIGINFO = 0x40

_SA_SIGINFO const #

const _SA_SIGINFO = 0x4

_SA_SIGINFO const #

const _SA_SIGINFO = 0x40

_SA_USERTRAMP const #

const _SA_USERTRAMP = 0x100

_SA_USERTRAMP const #

const _SA_USERTRAMP = 0x100

_SC_NPROCESSORS_ONLN const #

const _SC_NPROCESSORS_ONLN = C._SC_NPROCESSORS_ONLN

_SEGV_ACCERR const #

const _SEGV_ACCERR = 0x2

_SEGV_ACCERR const #

const _SEGV_ACCERR = 0x2

_SEGV_ACCERR const #

const _SEGV_ACCERR = 0x2

_SEGV_ACCERR const #

const _SEGV_ACCERR = 0x2

_SEGV_ACCERR const #

const _SEGV_ACCERR = 0x2

_SEGV_ACCERR const #

const _SEGV_ACCERR = 0x2

_SEGV_ACCERR const #

const _SEGV_ACCERR = 0x2

_SEGV_ACCERR const #

const _SEGV_ACCERR = 0x2

_SEGV_ACCERR const #

const _SEGV_ACCERR = 0x2

_SEGV_ACCERR const #

const _SEGV_ACCERR = 0x2

_SEGV_ACCERR const #

const _SEGV_ACCERR = 0x2

_SEGV_ACCERR const #

const _SEGV_ACCERR = 0x2

_SEGV_ACCERR const #

const _SEGV_ACCERR = 0x2

_SEGV_ACCERR const #

const _SEGV_ACCERR = 0x2

_SEGV_ACCERR const #

const _SEGV_ACCERR = 0x2

_SEGV_ACCERR const #

const _SEGV_ACCERR = 0x2

_SEGV_ACCERR const #

const _SEGV_ACCERR = 0x2

_SEGV_ACCERR const #

Constants

const _SEGV_ACCERR = 0x2

_SEGV_ACCERR const #

const _SEGV_ACCERR = 0x2

_SEGV_ACCERR const #

const _SEGV_ACCERR = 0x2

_SEGV_ACCERR const #

const _SEGV_ACCERR = 0x2

_SEGV_ACCERR const #

const _SEGV_ACCERR = 0x2

_SEGV_ACCERR const #

const _SEGV_ACCERR = C.SEGV_ACCERR

_SEGV_ACCERR const #

const _SEGV_ACCERR = 0x2

_SEGV_ACCERR const #

const _SEGV_ACCERR = 0x2

_SEGV_ACCERR const #

const _SEGV_ACCERR = 0x2

_SEGV_ACCERR const #

const _SEGV_ACCERR = 0x2

_SEGV_ACCERR const #

const _SEGV_ACCERR = 0x33

_SEGV_ACCERR const #

const _SEGV_ACCERR = 0x2

_SEGV_ACCERR const #

const _SEGV_ACCERR = 0x2

_SEGV_ACCERR const #

const _SEGV_ACCERR = 0x2

_SEGV_ACCERR const #

const _SEGV_ACCERR = 0x2

_SEGV_ACCERR const #

const _SEGV_ACCERR = 0x2

_SEGV_MAPERR const #

const _SEGV_MAPERR = 0x1

_SEGV_MAPERR const #

Constants

const _SEGV_MAPERR = 0x1

_SEGV_MAPERR const #

const _SEGV_MAPERR = 0x1

_SEGV_MAPERR const #

const _SEGV_MAPERR = 0x1

_SEGV_MAPERR const #

const _SEGV_MAPERR = 0x1

_SEGV_MAPERR const #

const _SEGV_MAPERR = 0x1

_SEGV_MAPERR const #

const _SEGV_MAPERR = 0x1

_SEGV_MAPERR const #

const _SEGV_MAPERR = 0x1

_SEGV_MAPERR const #

const _SEGV_MAPERR = 0x1

_SEGV_MAPERR const #

const _SEGV_MAPERR = 0x1

_SEGV_MAPERR const #

const _SEGV_MAPERR = 0x1

_SEGV_MAPERR const #

const _SEGV_MAPERR = 0x1

_SEGV_MAPERR const #

const _SEGV_MAPERR = 0x1

_SEGV_MAPERR const #

const _SEGV_MAPERR = 0x1

_SEGV_MAPERR const #

const _SEGV_MAPERR = 0x32

_SEGV_MAPERR const #

const _SEGV_MAPERR = 0x1

_SEGV_MAPERR const #

const _SEGV_MAPERR = 0x1

_SEGV_MAPERR const #

const _SEGV_MAPERR = 0x1

_SEGV_MAPERR const #

const _SEGV_MAPERR = 0x1

_SEGV_MAPERR const #

const _SEGV_MAPERR = 0x1

_SEGV_MAPERR const #

const _SEGV_MAPERR = 0x1

_SEGV_MAPERR const #

const _SEGV_MAPERR = 0x1

_SEGV_MAPERR const #

const _SEGV_MAPERR = 0x1

_SEGV_MAPERR const #

const _SEGV_MAPERR = 0x1

_SEGV_MAPERR const #

const _SEGV_MAPERR = 0x1

_SEGV_MAPERR const #

const _SEGV_MAPERR = 0x1

_SEGV_MAPERR const #

const _SEGV_MAPERR = 0x1

_SEGV_MAPERR const #

const _SEGV_MAPERR = 0x1

_SEGV_MAPERR const #

const _SEGV_MAPERR = 0x1

_SEGV_MAPERR const #

const _SEGV_MAPERR = C.SEGV_MAPERR

_SEGV_MAPERR const #

const _SEGV_MAPERR = 0x1

_SEGV_MAPERR const #

const _SEGV_MAPERR = 0x1

_SEGV_MAPERR const #

const _SEGV_MAPERR = 0x1

_SEM_FAILCRITICALERRORS const #

const _SEM_FAILCRITICALERRORS = 0x0001

_SEM_NOGPFAULTERRORBOX const #

const _SEM_NOGPFAULTERRORBOX = 0x0002

_SEM_NOOPENFILEERRORBOX const #

const _SEM_NOOPENFILEERRORBOX = 0x8000

_SHN_UNDEF const #

const _SHN_UNDEF = 0

_SHT_DYNSYM const #

const _SHT_DYNSYM = 11

_SIGABRT const #

const _SIGABRT = 0x6

_SIGABRT const #

const _SIGABRT = 0x6

_SIGABRT const #

const _SIGABRT = 0x6

_SIGABRT const #

const _SIGABRT = 0x6

_SIGABRT const #

const _SIGABRT = 0x6

_SIGABRT const #

const _SIGABRT = 0x6

_SIGABRT const #

const _SIGABRT = 0x6

_SIGABRT const #

const _SIGABRT = 0x6

_SIGABRT const #

const _SIGABRT = 0x6

_SIGABRT const #

const _SIGABRT = 0x6

_SIGABRT const #

const _SIGABRT = 0x6

_SIGABRT const #

const _SIGABRT = 0x6

_SIGABRT const #

const _SIGABRT = 0x6

_SIGABRT const #

const _SIGABRT = 0x6

_SIGABRT const #

const _SIGABRT = 0x6

_SIGABRT const #

const _SIGABRT = 0x6

_SIGABRT const #

const _SIGABRT = 0x6

_SIGABRT const #

const _SIGABRT = 0x6

_SIGABRT const #

const _SIGABRT = 0x6

_SIGABRT const #

const _SIGABRT = 0x6

_SIGABRT const #

const _SIGABRT = 0x6

_SIGABRT const #

const _SIGABRT = 0x6

_SIGABRT const #

const _SIGABRT = 0x6

_SIGABRT const #

Constants

const _SIGABRT = 0x6

_SIGABRT const #

const _SIGABRT = 0x6

_SIGABRT const #

const _SIGABRT = 0x6

_SIGABRT const #

const _SIGABRT = 0x6

_SIGABRT const #

const _SIGABRT = C.SIGABRT

_SIGABRT const #

const _SIGABRT = 0x6

_SIGABRT const #

const _SIGABRT = 0x6

_SIGABRT const #

const _SIGABRT = 0x6

_SIGABRT const #

const _SIGABRT = 0x6

_SIGABRT const #

const _SIGABRT = 0x6

_SIGALRM const #

const _SIGALRM = 0xe

_SIGALRM const #

const _SIGALRM = 0xe

_SIGALRM const #

const _SIGALRM = 0xe

_SIGALRM const #

const _SIGALRM = 0xe

_SIGALRM const #

const _SIGALRM = 0xe

_SIGALRM const #

const _SIGALRM = 0xe

_SIGALRM const #

Constants

const _SIGALRM = 0xe

_SIGALRM const #

const _SIGALRM = 0xe

_SIGALRM const #

const _SIGALRM = 0xe

_SIGALRM const #

const _SIGALRM = 0xe

_SIGALRM const #

const _SIGALRM = 0xe

_SIGALRM const #

const _SIGALRM = 0xe

_SIGALRM const #

const _SIGALRM = 0xe

_SIGALRM const #

const _SIGALRM = 0xe

_SIGALRM const #

const _SIGALRM = 0xe

_SIGALRM const #

const _SIGALRM = 0xe

_SIGALRM const #

const _SIGALRM = 0xe

_SIGALRM const #

const _SIGALRM = 0xe

_SIGALRM const #

const _SIGALRM = 0xe

_SIGALRM const #

const _SIGALRM = 0xe

_SIGALRM const #

const _SIGALRM = 0xe

_SIGALRM const #

const _SIGALRM = 0xe

_SIGALRM const #

const _SIGALRM = 0xe

_SIGALRM const #

const _SIGALRM = 0xe

_SIGALRM const #

const _SIGALRM = 0xe

_SIGALRM const #

const _SIGALRM = 0xe

_SIGALRM const #

const _SIGALRM = C.SIGALRM

_SIGALRM const #

const _SIGALRM = 0xe

_SIGALRM const #

const _SIGALRM = 0xe

_SIGALRM const #

const _SIGALRM = 0xe

_SIGALRM const #

const _SIGALRM = 0xe

_SIGALRM const #

const _SIGALRM = 0xe

_SIGALRM const #

const _SIGALRM = 0xe

_SIGBUS const #

const _SIGBUS = 0xa

_SIGBUS const #

const _SIGBUS = 0xa

_SIGBUS const #

const _SIGBUS = 0x7

_SIGBUS const #

const _SIGBUS = 0xa

_SIGBUS const #

const _SIGBUS = 0xa

_SIGBUS const #

const _SIGBUS = 0x7

_SIGBUS const #

const _SIGBUS = 0xa

_SIGBUS const #

const _SIGBUS = 0xa

_SIGBUS const #

const _SIGBUS = 0x7

_SIGBUS const #

const _SIGBUS = 0xa

_SIGBUS const #

const _SIGBUS = C.SIGBUS

_SIGBUS const #

const _SIGBUS = 0xa

_SIGBUS const #

const _SIGBUS = 0xa

_SIGBUS const #

const _SIGBUS = 0xa

_SIGBUS const #

const _SIGBUS = 0xa

_SIGBUS const #

const _SIGBUS = 0xa

_SIGBUS const #

const _SIGBUS = 0xa

_SIGBUS const #

const _SIGBUS = 0xa

_SIGBUS const #

const _SIGBUS = 0xa

_SIGBUS const #

const _SIGBUS = 0x7

_SIGBUS const #

const _SIGBUS = 0xa

_SIGBUS const #

const _SIGBUS = 0x7

_SIGBUS const #

const _SIGBUS = 0xa

_SIGBUS const #

const _SIGBUS = 0xa

_SIGBUS const #

const _SIGBUS = 0xa

_SIGBUS const #

const _SIGBUS = 0xa

_SIGBUS const #

const _SIGBUS = 0xa

_SIGBUS const #

const _SIGBUS = 0x7

_SIGBUS const #

const _SIGBUS = 0xa

_SIGBUS const #

const _SIGBUS = 0x7

_SIGBUS const #

const _SIGBUS = 0x7

_SIGBUS const #

const _SIGBUS = 0xa

_SIGBUS const #

Constants

const _SIGBUS = 0x7

_SIGCHLD const #

const _SIGCHLD = 0x14

_SIGCHLD const #

const _SIGCHLD = 0x14

_SIGCHLD const #

const _SIGCHLD = 0x14

_SIGCHLD const #

const _SIGCHLD = 0x11

_SIGCHLD const #

const _SIGCHLD = 0x11

_SIGCHLD const #

const _SIGCHLD = 0x14

_SIGCHLD const #

const _SIGCHLD = 0x14

_SIGCHLD const #

const _SIGCHLD = 0x12

_SIGCHLD const #

const _SIGCHLD = 0x14

_SIGCHLD const #

const _SIGCHLD = 0x14

_SIGCHLD const #

const _SIGCHLD = 0x12

_SIGCHLD const #

const _SIGCHLD = 0x14

_SIGCHLD const #

const _SIGCHLD = 0x14

_SIGCHLD const #

const _SIGCHLD = 0x14

_SIGCHLD const #

const _SIGCHLD = 0x14

_SIGCHLD const #

const _SIGCHLD = 0x11

_SIGCHLD const #

const _SIGCHLD = 0x11

_SIGCHLD const #

const _SIGCHLD = 0x11

_SIGCHLD const #

const _SIGCHLD = 0x11

_SIGCHLD const #

const _SIGCHLD = 0x14

_SIGCHLD const #

const _SIGCHLD = 0x14

_SIGCHLD const #

const _SIGCHLD = 0x11

_SIGCHLD const #

const _SIGCHLD = 0x11

_SIGCHLD const #

const _SIGCHLD = C.SIGCHLD

_SIGCHLD const #

Constants

const _SIGCHLD = 0x11

_SIGCHLD const #

const _SIGCHLD = 0x14

_SIGCHLD const #

const _SIGCHLD = 0x14

_SIGCHLD const #

const _SIGCHLD = 0x14

_SIGCHLD const #

const _SIGCHLD = 0x14

_SIGCHLD const #

const _SIGCHLD = 0x14

_SIGCHLD const #

const _SIGCHLD = 0x12

_SIGCHLD const #

const _SIGCHLD = 0x14

_SIGCHLD const #

const _SIGCHLD = 0x14

_SIGCONT const #

const _SIGCONT = 0x13

_SIGCONT const #

const _SIGCONT = 0x12

_SIGCONT const #

const _SIGCONT = 0x13

_SIGCONT const #

const _SIGCONT = 0x13

_SIGCONT const #

const _SIGCONT = 0x13

_SIGCONT const #

const _SIGCONT = 0x19

_SIGCONT const #

const _SIGCONT = 0x13

_SIGCONT const #

const _SIGCONT = 0x13

_SIGCONT const #

const _SIGCONT = 0x13

_SIGCONT const #

const _SIGCONT = 0x13

_SIGCONT const #

const _SIGCONT = 0x12

_SIGCONT const #

const _SIGCONT = 0x13

_SIGCONT const #

const _SIGCONT = C.SIGCONT

_SIGCONT const #

const _SIGCONT = 0x12

_SIGCONT const #

const _SIGCONT = 0x13

_SIGCONT const #

const _SIGCONT = 0x13

_SIGCONT const #

const _SIGCONT = 0x12

_SIGCONT const #

const _SIGCONT = 0x13

_SIGCONT const #

const _SIGCONT = 0x13

_SIGCONT const #

const _SIGCONT = 0x13

_SIGCONT const #

const _SIGCONT = 0x12

_SIGCONT const #

Constants

const _SIGCONT = 0x12

_SIGCONT const #

const _SIGCONT = 0x19

_SIGCONT const #

const _SIGCONT = 0x13

_SIGCONT const #

const _SIGCONT = 0x12

_SIGCONT const #

const _SIGCONT = 0x19

_SIGCONT const #

const _SIGCONT = 0x13

_SIGCONT const #

const _SIGCONT = 0x13

_SIGCONT const #

const _SIGCONT = 0x12

_SIGCONT const #

const _SIGCONT = 0x13

_SIGCONT const #

const _SIGCONT = 0x12

_SIGCONT const #

const _SIGCONT = 0x13

_SIGCONT const #

const _SIGCONT = 0x13

_SIGEMT const #

const _SIGEMT = 0x7

_SIGEMT const #

const _SIGEMT = 0x7

_SIGEMT const #

const _SIGEMT = 0x7

_SIGEMT const #

const _SIGEMT = 0x7

_SIGEMT const #

const _SIGEMT = 0x7

_SIGEMT const #

const _SIGEMT = 0x7

_SIGEMT const #

const _SIGEMT = 0x7

_SIGEMT const #

const _SIGEMT = 0x7

_SIGEMT const #

const _SIGEMT = 0x7

_SIGEMT const #

const _SIGEMT = 0x7

_SIGEMT const #

const _SIGEMT = 0x7

_SIGEMT const #

const _SIGEMT = 0x7

_SIGEMT const #

const _SIGEMT = 0x7

_SIGEMT const #

const _SIGEMT = 0x7

_SIGEMT const #

const _SIGEMT = C.SIGEMT

_SIGEMT const #

const _SIGEMT = 0x7

_SIGEMT const #

const _SIGEMT = 0x7

_SIGEMT const #

const _SIGEMT = 0x7

_SIGEMT const #

const _SIGEMT = 0x7

_SIGEMT const #

const _SIGEMT = 0x7

_SIGEMT const #

const _SIGEMT = 0x7

_SIGEMT const #

const _SIGEMT = 0x7

_SIGEMT const #

const _SIGEMT = 0x7

_SIGEMT const #

const _SIGEMT = 0x7

_SIGEV_THREAD_ID const #

const _SIGEV_THREAD_ID = 0x4

_SIGEV_THREAD_ID const #

const _SIGEV_THREAD_ID = 0x4

_SIGEV_THREAD_ID const #

const _SIGEV_THREAD_ID = 0x4

_SIGEV_THREAD_ID const #

Constants

const _SIGEV_THREAD_ID = 0x4

_SIGEV_THREAD_ID const #

const _SIGEV_THREAD_ID = 0x4

_SIGEV_THREAD_ID const #

const _SIGEV_THREAD_ID = 0x4

_SIGEV_THREAD_ID const #

const _SIGEV_THREAD_ID = 0x4

_SIGEV_THREAD_ID const #

const _SIGEV_THREAD_ID = 0x4

_SIGEV_THREAD_ID const #

const _SIGEV_THREAD_ID = 0x4

_SIGEV_THREAD_ID const #

const _SIGEV_THREAD_ID = 0x4

_SIGEV_THREAD_ID const #

const _SIGEV_THREAD_ID = 0x4

_SIGFLOAT const #

const _SIGFLOAT = 5

_SIGFPE const #

const _SIGFPE = 0x8

_SIGFPE const #

const _SIGFPE = C.SIGFPE

_SIGFPE const #

const _SIGFPE = 0x8

_SIGFPE const #

const _SIGFPE = 0x8

_SIGFPE const #

const _SIGFPE = 0x8

_SIGFPE const #

const _SIGFPE = 0x8

_SIGFPE const #

const _SIGFPE = 0x8

_SIGFPE const #

const _SIGFPE = 0x8

_SIGFPE const #

const _SIGFPE = 0x8

_SIGFPE const #

const _SIGFPE = 0x8

_SIGFPE const #

const _SIGFPE = 0x8

_SIGFPE const #

const _SIGFPE = 0x8

_SIGFPE const #

const _SIGFPE = 0x8

_SIGFPE const #

const _SIGFPE = 0x8

_SIGFPE const #

const _SIGFPE = 0x8

_SIGFPE const #

const _SIGFPE = 0x8

_SIGFPE const #

const _SIGFPE = 0x8

_SIGFPE const #

const _SIGFPE = 0x8

_SIGFPE const #

const _SIGFPE = 0x8

_SIGFPE const #

const _SIGFPE = 0x8

_SIGFPE const #

const _SIGFPE = 0x8

_SIGFPE const #

const _SIGFPE = 0x8

_SIGFPE const #

const _SIGFPE = 0x8

_SIGFPE const #

const _SIGFPE = 0x8

_SIGFPE const #

const _SIGFPE = 0x8

_SIGFPE const #

const _SIGFPE = 0x8

_SIGFPE const #

const _SIGFPE = 0x8

_SIGFPE const #

const _SIGFPE = 0x8

_SIGFPE const #

Constants

const _SIGFPE = 0x8

_SIGFPE const #

const _SIGFPE = 0x8

_SIGFPE const #

const _SIGFPE = 0x8

_SIGFPE const #

const _SIGFPE = 0x8

_SIGFPE const #

const _SIGFPE = 0x8

_SIGHUP const #

const _SIGHUP = 0x1

_SIGHUP const #

const _SIGHUP = 0x1

_SIGHUP const #

const _SIGHUP = 0x1

_SIGHUP const #

const _SIGHUP = 0x1

_SIGHUP const #

const _SIGHUP = 0x1

_SIGHUP const #

const _SIGHUP = 0x1

_SIGHUP const #

const _SIGHUP = 0x1

_SIGHUP const #

const _SIGHUP = 0x1

_SIGHUP const #

const _SIGHUP = 0x1

_SIGHUP const #

const _SIGHUP = 0x1

_SIGHUP const #

const _SIGHUP = 0x1

_SIGHUP const #

Constants

const _SIGHUP = 0x1

_SIGHUP const #

const _SIGHUP = 0x1

_SIGHUP const #

const _SIGHUP = 0x1

_SIGHUP const #

const _SIGHUP = 0x1

_SIGHUP const #

const _SIGHUP = 0x1

_SIGHUP const #

const _SIGHUP = 0x1

_SIGHUP const #

const _SIGHUP = 0x1

_SIGHUP const #

const _SIGHUP = 0x1

_SIGHUP const #

const _SIGHUP = 0x1

_SIGHUP const #

const _SIGHUP = 0x1

_SIGHUP const #

const _SIGHUP = 0x1

_SIGHUP const #

const _SIGHUP = 0x1

_SIGHUP const #

const _SIGHUP = 0x1

_SIGHUP const #

const _SIGHUP = 0x1

_SIGHUP const #

const _SIGHUP = 0x1

_SIGHUP const #

const _SIGHUP = 0x1

_SIGHUP const #

const _SIGHUP = 0x1

_SIGHUP const #

const _SIGHUP = 0x1

_SIGHUP const #

const _SIGHUP = 0x1

_SIGHUP const #

const _SIGHUP = C.SIGHUP

_SIGHUP const #

const _SIGHUP = 0x1

_SIGHUP const #

const _SIGHUP = 0x1

_SIGILL const #

const _SIGILL = 0x4

_SIGILL const #

const _SIGILL = 0x4

_SIGILL const #

const _SIGILL = 0x4

_SIGILL const #

const _SIGILL = 0x4

_SIGILL const #

const _SIGILL = 0x4

_SIGILL const #

const _SIGILL = C.SIGILL

_SIGILL const #

const _SIGILL = 0x4

_SIGILL const #

const _SIGILL = 0x4

_SIGILL const #

const _SIGILL = 0x4

_SIGILL const #

const _SIGILL = 0x4

_SIGILL const #

const _SIGILL = 0x4

_SIGILL const #

const _SIGILL = 0x4

_SIGILL const #

Constants

const _SIGILL = 0x4

_SIGILL const #

const _SIGILL = 0x4

_SIGILL const #

const _SIGILL = 0x4

_SIGILL const #

const _SIGILL = 0x4

_SIGILL const #

const _SIGILL = 0x4

_SIGILL const #

const _SIGILL = 0x4

_SIGILL const #

const _SIGILL = 0x4

_SIGILL const #

const _SIGILL = 0x4

_SIGILL const #

const _SIGILL = 0x4

_SIGILL const #

const _SIGILL = 0x4

_SIGILL const #

const _SIGILL = 0x4

_SIGILL const #

const _SIGILL = 0x4

_SIGILL const #

const _SIGILL = 0x4

_SIGILL const #

const _SIGILL = 0x4

_SIGILL const #

const _SIGILL = 0x4

_SIGILL const #

const _SIGILL = 0x4

_SIGILL const #

const _SIGILL = 0x4

_SIGILL const #

const _SIGILL = 0x4

_SIGILL const #

const _SIGILL = 0x4

_SIGILL const #

const _SIGILL = 0x4

_SIGILL const #

const _SIGILL = 0x4

_SIGINFO const #

const _SIGINFO = 0x1d

_SIGINFO const #

const _SIGINFO = 0x1d

_SIGINFO const #

const _SIGINFO = 0x1d

_SIGINFO const #

const _SIGINFO = 0x1d

_SIGINFO const #

const _SIGINFO = 0x1d

_SIGINFO const #

const _SIGINFO = 0x1d

_SIGINFO const #

const _SIGINFO = 0x1d

_SIGINFO const #

const _SIGINFO = 0x1d

_SIGINFO const #

const _SIGINFO = 0x1d

_SIGINFO const #

const _SIGINFO = 0x1d

_SIGINFO const #

const _SIGINFO = 0x1d

_SIGINFO const #

const _SIGINFO = 0x1d

_SIGINFO const #

const _SIGINFO = 0x1d

_SIGINFO const #

const _SIGINFO = 0x1d

_SIGINFO const #

const _SIGINFO = 0x1d

_SIGINFO const #

const _SIGINFO = 0x1d

_SIGINFO const #

const _SIGINFO = 0x1d

_SIGINFO const #

const _SIGINFO = 0x1d

_SIGINFO const #

const _SIGINFO = 0x1d

_SIGINT const #

const _SIGINT = 0x2

_SIGINT const #

const _SIGINT = 0x2

_SIGINT const #

const _SIGINT = 0x2

_SIGINT const #

const _SIGINT = 0x2

_SIGINT const #

const _SIGINT = 0x2

_SIGINT const #

const _SIGINT = 0x2

_SIGINT const #

const _SIGINT = 0x2

_SIGINT const #

const _SIGINT = 0x2

_SIGINT const #

const _SIGINT = 0x2

_SIGINT const #

const _SIGINT = 0x2

_SIGINT const #

Constants

const _SIGINT = 0x2

_SIGINT const #

const _SIGINT = 0x2

_SIGINT const #

const _SIGINT = 0x2

_SIGINT const #

const _SIGINT = 0x2

_SIGINT const #

const _SIGINT = 0x2

_SIGINT const #

const _SIGINT = 0x2

_SIGINT const #

const _SIGINT = 0x2

_SIGINT const #

const _SIGINT = 0x2

_SIGINT const #

const _SIGINT = 0x2

_SIGINT const #

const _SIGINT = 0x2

_SIGINT const #

const _SIGINT = 0x2

_SIGINT const #

const _SIGINT = 0x2

_SIGINT const #

const _SIGINT = 0x2

_SIGINT const #

const _SIGINT = 0x2

_SIGINT const #

const _SIGINT = 0x2

_SIGINT const #

const _SIGINT = 0x2

_SIGINT const #

const _SIGINT = 0x2

_SIGINT const #

const _SIGINT = 0x2

_SIGINT const #

const _SIGINT = 0x2

_SIGINT const #

const _SIGINT = C.SIGINT

_SIGINT const #

const _SIGINT = 0x2

_SIGINT const #

const _SIGINT = 0x2

_SIGINT const #

const _SIGINT = 0x2

_SIGINT const #

const _SIGINT = 0x2

_SIGINTDIV const #

const _SIGINTDIV = 4

_SIGIO const #

const _SIGIO = 0x17

_SIGIO const #

const _SIGIO = 0x17

_SIGIO const #

const _SIGIO = 0x17

_SIGIO const #

const _SIGIO = 0x17

_SIGIO const #

const _SIGIO = 0x17

_SIGIO const #

const _SIGIO = 0x16

_SIGIO const #

const _SIGIO = 0x17

_SIGIO const #

const _SIGIO = 0x17

_SIGIO const #

const _SIGIO = 0x1d

_SIGIO const #

const _SIGIO = 0x17

_SIGIO const #

const _SIGIO = 0x17

_SIGIO const #

const _SIGIO = 0x1d

_SIGIO const #

const _SIGIO = 0x17

_SIGIO const #

const _SIGIO = 0x17

_SIGIO const #

const _SIGIO = 0x16

_SIGIO const #

const _SIGIO = 0x17

_SIGIO const #

const _SIGIO = 0x17

_SIGIO const #

const _SIGIO = 0x16

_SIGIO const #

const _SIGIO = 0x1d

_SIGIO const #

const _SIGIO = 0x1d

_SIGIO const #

const _SIGIO = 0x1d

_SIGIO const #

const _SIGIO = 0x1d

_SIGIO const #

const _SIGIO = C.SIGIO

_SIGIO const #

const _SIGIO = 0x17

_SIGIO const #

const _SIGIO = 0x17

_SIGIO const #

const _SIGIO = 0x17

_SIGIO const #

const _SIGIO = 0x17

_SIGIO const #

const _SIGIO = 0x17

_SIGIO const #

const _SIGIO = 0x1d

_SIGIO const #

const _SIGIO = 0x17

_SIGIO const #

const _SIGIO = 0x17

_SIGIO const #

Constants

const _SIGIO = 0x1d

_SIGIO const #

const _SIGIO = 0x1d

_SIGKILL const #

const _SIGKILL = 0x9

_SIGKILL const #

const _SIGKILL = C.SIGKILL

_SIGKILL const #

const _SIGKILL = 0x9

_SIGKILL const #

const _SIGKILL = 0x9

_SIGKILL const #

const _SIGKILL = 0x9

_SIGKILL const #

const _SIGKILL = 0x9

_SIGKILL const #

Constants

const _SIGKILL = 0x9

_SIGKILL const #

const _SIGKILL = 0x9

_SIGKILL const #

const _SIGKILL = 0x9

_SIGKILL const #

const _SIGKILL = 0x9

_SIGKILL const #

const _SIGKILL = 0x9

_SIGKILL const #

const _SIGKILL = 0x9

_SIGKILL const #

const _SIGKILL = 0x9

_SIGKILL const #

const _SIGKILL = 0x9

_SIGKILL const #

const _SIGKILL = 0x9

_SIGKILL const #

const _SIGKILL = 0x9

_SIGKILL const #

const _SIGKILL = 0x9

_SIGKILL const #

const _SIGKILL = 0x9

_SIGKILL const #

const _SIGKILL = 0x9

_SIGKILL const #

const _SIGKILL = 0x9

_SIGKILL const #

const _SIGKILL = 0x9

_SIGKILL const #

const _SIGKILL = 0x9

_SIGKILL const #

const _SIGKILL = 0x9

_SIGKILL const #

const _SIGKILL = 0x9

_SIGKILL const #

const _SIGKILL = 0x9

_SIGKILL const #

const _SIGKILL = 0x9

_SIGKILL const #

const _SIGKILL = 0x9

_SIGKILL const #

const _SIGKILL = 0x9

_SIGKILL const #

const _SIGKILL = 0x9

_SIGKILL const #

const _SIGKILL = 0x9

_SIGKILL const #

const _SIGKILL = 0x9

_SIGKILL const #

const _SIGKILL = 0x9

_SIGKILL const #

const _SIGKILL = 0x9

_SIGPIPE const #

const _SIGPIPE = 0xd

_SIGPIPE const #

const _SIGPIPE = C.SIGPIPE

_SIGPIPE const #

const _SIGPIPE = 0xd

_SIGPIPE const #

const _SIGPIPE = 0xd

_SIGPIPE const #

const _SIGPIPE = 0xd

_SIGPIPE const #

const _SIGPIPE = 0xd

_SIGPIPE const #

const _SIGPIPE = 0xd

_SIGPIPE const #

const _SIGPIPE = 0xd

_SIGPIPE const #

const _SIGPIPE = 0xd

_SIGPIPE const #

const _SIGPIPE = 0xd

_SIGPIPE const #

const _SIGPIPE = 0xd

_SIGPIPE const #

const _SIGPIPE = 0xd

_SIGPIPE const #

const _SIGPIPE = 0xd

_SIGPIPE const #

const _SIGPIPE = 0xd

_SIGPIPE const #

const _SIGPIPE = 0xd

_SIGPIPE const #

const _SIGPIPE = 0xd

_SIGPIPE const #

const _SIGPIPE = 0xd

_SIGPIPE const #

const _SIGPIPE = 0xd

_SIGPIPE const #

const _SIGPIPE = 0xd

_SIGPIPE const #

const _SIGPIPE = 0xd

_SIGPIPE const #

const _SIGPIPE = 0xd

_SIGPIPE const #

const _SIGPIPE = 0xd

_SIGPIPE const #

const _SIGPIPE = 0xd

_SIGPIPE const #

const _SIGPIPE = 0xd

_SIGPIPE const #

const _SIGPIPE = 0xd

_SIGPIPE const #

const _SIGPIPE = 0xd

_SIGPIPE const #

const _SIGPIPE = 0xd

_SIGPIPE const #

const _SIGPIPE = 0xd

_SIGPIPE const #

const _SIGPIPE = 0xd

_SIGPIPE const #

const _SIGPIPE = 0xd

_SIGPIPE const #

const _SIGPIPE = 0xd

_SIGPIPE const #

const _SIGPIPE = 0xd

_SIGPIPE const #

Constants

const _SIGPIPE = 0xd

_SIGPROF const #

const _SIGPROF = 0x1b

_SIGPROF const #

const _SIGPROF = 0x1b

_SIGPROF const #

const _SIGPROF = 0x1b

_SIGPROF const #

const _SIGPROF = 0x1b

_SIGPROF const #

const _SIGPROF = 0x1d

_SIGPROF const #

const _SIGPROF = 0x1b

_SIGPROF const #

const _SIGPROF = 0x1b

_SIGPROF const #

const _SIGPROF = 0x1b

_SIGPROF const #

const _SIGPROF = C.SIGPROF

_SIGPROF const #

const _SIGPROF = 0x1b

_SIGPROF const #

const _SIGPROF = 0x1b

_SIGPROF const #

const _SIGPROF = 0x1b

_SIGPROF const #

const _SIGPROF = 0x1b

_SIGPROF const #

const _SIGPROF = 0x1b

_SIGPROF const #

const _SIGPROF = 0x1b

_SIGPROF const #

Constants

const _SIGPROF = 0x1b

_SIGPROF const #

const _SIGPROF = 0x1b

_SIGPROF const #

const _SIGPROF = 0x1b

_SIGPROF const #

const _SIGPROF = 0x1b

_SIGPROF const #

const _SIGPROF = 0x1b

_SIGPROF const #

const _SIGPROF = 0x1b

_SIGPROF const #

const _SIGPROF = 0x1b

_SIGPROF const #

const _SIGPROF = 0x1b

_SIGPROF const #

const _SIGPROF = 0x1b

_SIGPROF const #

const _SIGPROF = 0x1b

_SIGPROF const #

const _SIGPROF = 0x1b

_SIGPROF const #

const _SIGPROF = 0x1b

_SIGPROF const #

const _SIGPROF = 0x1b

_SIGPROF const #

const _SIGPROF = 0x1d

_SIGPROF const #

const _SIGPROF = 0

_SIGPROF const #

const _SIGPROF = 0x1b

_SIGPROF const #

const _SIGPROF = 0x20

_SIGPROF const #

const _SIGPROF = 0x1d

_SIGPROF const #

const _SIGPROF = 0x1b

_SIGPWR const #

const _SIGPWR = 0x1e

_SIGPWR const #

const _SIGPWR = 0x1e

_SIGPWR const #

const _SIGPWR = 0x1e

_SIGPWR const #

const _SIGPWR = 0x1e

_SIGPWR const #

const _SIGPWR = C.SIGPWR

_SIGPWR const #

const _SIGPWR = 0x1e

_SIGPWR const #

const _SIGPWR = 0x1d

_SIGPWR const #

const _SIGPWR = 0x1e

_SIGPWR const #

Constants

const _SIGPWR = 0x1e

_SIGPWR const #

const _SIGPWR = 0x1e

_SIGPWR const #

const _SIGPWR = 0x1e

_SIGPWR const #

const _SIGPWR = 0x13

_SIGPWR const #

const _SIGPWR = 0x13

_SIGQUIT const #

const _SIGQUIT = 0x3

_SIGQUIT const #

const _SIGQUIT = 0x3

_SIGQUIT const #

const _SIGQUIT = 0x3

_SIGQUIT const #

const _SIGQUIT = 0x3

_SIGQUIT const #

const _SIGQUIT = 0x3

_SIGQUIT const #

const _SIGQUIT = 0x3

_SIGQUIT const #

const _SIGQUIT = 0x3

_SIGQUIT const #

const _SIGQUIT = 0x3

_SIGQUIT const #

const _SIGQUIT = 0x3

_SIGQUIT const #

const _SIGQUIT = 0x3

_SIGQUIT const #

const _SIGQUIT = 0x3

_SIGQUIT const #

const _SIGQUIT = 0x3

_SIGQUIT const #

const _SIGQUIT = 0x3

_SIGQUIT const #

const _SIGQUIT = 0x3

_SIGQUIT const #

const _SIGQUIT = 0x3

_SIGQUIT const #

const _SIGQUIT = 0x3

_SIGQUIT const #

const _SIGQUIT = C.SIGQUIT

_SIGQUIT const #

const _SIGQUIT = 0x3

_SIGQUIT const #

const _SIGQUIT = 0x3

_SIGQUIT const #

const _SIGQUIT = 0x3

_SIGQUIT const #

const _SIGQUIT = 0x3

_SIGQUIT const #

Constants

const _SIGQUIT = 0x3

_SIGQUIT const #

const _SIGQUIT = 0x3

_SIGQUIT const #

const _SIGQUIT = 0x3

_SIGQUIT const #

const _SIGQUIT = 0x3

_SIGQUIT const #

const _SIGQUIT = 0x3

_SIGQUIT const #

const _SIGQUIT = 0x3

_SIGQUIT const #

const _SIGQUIT = 0x3

_SIGQUIT const #

const _SIGQUIT = 0x3

_SIGQUIT const #

const _SIGQUIT = 0x3

_SIGQUIT const #

const _SIGQUIT = 0x3

_SIGQUIT const #

const _SIGQUIT = 0

_SIGQUIT const #

const _SIGQUIT = 0x3

_SIGQUIT const #

const _SIGQUIT = 0x3

_SIGRFAULT const #

Notes in runtime·sigtab that are handled by runtime·sigpanic.

const _SIGRFAULT = 2

_SIGRTMIN const #

const _SIGRTMIN = 0x20

_SIGRTMIN const #

const _SIGRTMIN = 0x20

_SIGRTMIN const #

const _SIGRTMIN = 0x20

_SIGRTMIN const #

const _SIGRTMIN = 0x20

_SIGRTMIN const #

const _SIGRTMIN = 0x20

_SIGRTMIN const #

const _SIGRTMIN = 0x20

_SIGRTMIN const #

const _SIGRTMIN = 0x20

_SIGRTMIN const #

const _SIGRTMIN = 0x20

_SIGRTMIN const #

const _SIGRTMIN = 0x20

_SIGRTMIN const #

const _SIGRTMIN = 0x20

_SIGRTMIN const #

Constants

const _SIGRTMIN = 0x20

_SIGSEGV const #

const _SIGSEGV = 0xb

_SIGSEGV const #

const _SIGSEGV = 0xb

_SIGSEGV const #

const _SIGSEGV = 0xb

_SIGSEGV const #

const _SIGSEGV = 0xb

_SIGSEGV const #

const _SIGSEGV = 0xb

_SIGSEGV const #

const _SIGSEGV = 0xb

_SIGSEGV const #

const _SIGSEGV = 0xb

_SIGSEGV const #

const _SIGSEGV = 0xb

_SIGSEGV const #

const _SIGSEGV = 0xb

_SIGSEGV const #

const _SIGSEGV = 0xb

_SIGSEGV const #

Constants

const _SIGSEGV = 0xb

_SIGSEGV const #

const _SIGSEGV = 0xb

_SIGSEGV const #

const _SIGSEGV = 0xb

_SIGSEGV const #

const _SIGSEGV = 0xb

_SIGSEGV const #

const _SIGSEGV = 0xb

_SIGSEGV const #

const _SIGSEGV = 0xb

_SIGSEGV const #

const _SIGSEGV = 0xb

_SIGSEGV const #

const _SIGSEGV = 0xb

_SIGSEGV const #

const _SIGSEGV = 0xb

_SIGSEGV const #

const _SIGSEGV = C.SIGSEGV

_SIGSEGV const #

const _SIGSEGV = 0xb

_SIGSEGV const #

const _SIGSEGV = 0xb

_SIGSEGV const #

const _SIGSEGV = 0xb

_SIGSEGV const #

const _SIGSEGV = 0xb

_SIGSEGV const #

const _SIGSEGV = 0xb

_SIGSEGV const #

const _SIGSEGV = 0xb

_SIGSEGV const #

const _SIGSEGV = 0xb

_SIGSEGV const #

const _SIGSEGV = 0xb

_SIGSEGV const #

const _SIGSEGV = 0xb

_SIGSEGV const #

const _SIGSEGV = 0xb

_SIGSEGV const #

const _SIGSEGV = 0xb

_SIGSEGV const #

const _SIGSEGV = 0xb

_SIGSEGV const #

const _SIGSEGV = 0xb

_SIGSEGV const #

const _SIGSEGV = 0xb

_SIGSTKFLT const #

const _SIGSTKFLT = 0x10

_SIGSTKFLT const #

const _SIGSTKFLT = 0x10

_SIGSTKFLT const #

const _SIGSTKFLT = 0x10

_SIGSTKFLT const #

const _SIGSTKFLT = 0x10

_SIGSTKFLT const #

const _SIGSTKFLT = 0x10

_SIGSTKFLT const #

const _SIGSTKFLT = 0x10

_SIGSTKFLT const #

Constants

const _SIGSTKFLT = 0x10

_SIGSTKFLT const #

const _SIGSTKFLT = 0x10

_SIGSTKFLT const #

const _SIGSTKFLT = 0x10

_SIGSTOP const #

const _SIGSTOP = 0x11

_SIGSTOP const #

const _SIGSTOP = 0x11

_SIGSTOP const #

const _SIGSTOP = 0x13

_SIGSTOP const #

const _SIGSTOP = 0x11

_SIGSTOP const #

const _SIGSTOP = C.SIGSTOP

_SIGSTOP const #

const _SIGSTOP = 0x11

_SIGSTOP const #

const _SIGSTOP = 0x11

_SIGSTOP const #

const _SIGSTOP = 0x11

_SIGSTOP const #

const _SIGSTOP = 0x11

_SIGSTOP const #

const _SIGSTOP = 0x11

_SIGSTOP const #

const _SIGSTOP = 0x17

_SIGSTOP const #

const _SIGSTOP = 0x11

_SIGSTOP const #

const _SIGSTOP = 0x13

_SIGSTOP const #

const _SIGSTOP = 0x11

_SIGSTOP const #

const _SIGSTOP = 0x11

_SIGSTOP const #

const _SIGSTOP = 0x13

_SIGSTOP const #

const _SIGSTOP = 0x17

_SIGSTOP const #

const _SIGSTOP = 0x11

_SIGSTOP const #

const _SIGSTOP = 0x11

_SIGSTOP const #

const _SIGSTOP = 0x13

_SIGSTOP const #

const _SIGSTOP = 0x11

_SIGSTOP const #

const _SIGSTOP = 0x11

_SIGSTOP const #

const _SIGSTOP = 0x17

_SIGSTOP const #

const _SIGSTOP = 0x13

_SIGSTOP const #

const _SIGSTOP = 0x13

_SIGSTOP const #

const _SIGSTOP = 0x11

_SIGSTOP const #

const _SIGSTOP = 0x11

_SIGSTOP const #

const _SIGSTOP = 0x11

_SIGSTOP const #

const _SIGSTOP = 0x13

_SIGSTOP const #

Constants

const _SIGSTOP = 0x13

_SIGSTOP const #

const _SIGSTOP = 0x11

_SIGSTOP const #

const _SIGSTOP = 0x11

_SIGSTOP const #

const _SIGSTOP = 0x13

_SIGSYS const #

const _SIGSYS = 0xc

_SIGSYS const #

const _SIGSYS = C.SIGSYS

_SIGSYS const #

const _SIGSYS = 0xc

_SIGSYS const #

const _SIGSYS = 0xc

_SIGSYS const #

const _SIGSYS = 0x1f

_SIGSYS const #

Constants

const _SIGSYS = 0x1f

_SIGSYS const #

const _SIGSYS = 0xc

_SIGSYS const #

const _SIGSYS = 0x1f

_SIGSYS const #

const _SIGSYS = 0xc

_SIGSYS const #

const _SIGSYS = 0x1f

_SIGSYS const #

const _SIGSYS = 0xc

_SIGSYS const #

const _SIGSYS = 0xc

_SIGSYS const #

const _SIGSYS = 0xc

_SIGSYS const #

const _SIGSYS = 0xc

_SIGSYS const #

const _SIGSYS = 0xc

_SIGSYS const #

const _SIGSYS = 0x1f

_SIGSYS const #

const _SIGSYS = 0xc

_SIGSYS const #

const _SIGSYS = 0xc

_SIGSYS const #

const _SIGSYS = 0x1f

_SIGSYS const #

const _SIGSYS = 0xc

_SIGSYS const #

const _SIGSYS = 0x1f

_SIGSYS const #

const _SIGSYS = 0xc

_SIGSYS const #

const _SIGSYS = 0x1f

_SIGSYS const #

const _SIGSYS = 0x1f

_SIGSYS const #

const _SIGSYS = 0xc

_SIGSYS const #

const _SIGSYS = 0xc

_SIGSYS const #

const _SIGSYS = 0xc

_SIGSYS const #

const _SIGSYS = 0xc

_SIGSYS const #

const _SIGSYS = 0xc

_SIGSYS const #

const _SIGSYS = 0xc

_SIGSYS const #

const _SIGSYS = 0xc

_SIGSYS const #

const _SIGSYS = 0xc

_SIGSYS const #

const _SIGSYS = 0xc

_SIGTERM const #

const _SIGTERM = C.SIGTERM

_SIGTERM const #

const _SIGTERM = 0xF

_SIGTERM const #

const _SIGTERM = 0xf

_SIGTERM const #

const _SIGTERM = 0xf

_SIGTERM const #

const _SIGTERM = 0xf

_SIGTERM const #

const _SIGTERM = 0xf

_SIGTERM const #

const _SIGTERM = 0xf

_SIGTERM const #

const _SIGTERM = 0xf

_SIGTERM const #

const _SIGTERM = 0xf

_SIGTERM const #

const _SIGTERM = 0xf

_SIGTERM const #

const _SIGTERM = 0xf

_SIGTERM const #

const _SIGTERM = 0xf

_SIGTERM const #

const _SIGTERM = 0xf

_SIGTERM const #

const _SIGTERM = 0xf

_SIGTERM const #

const _SIGTERM = 0xf

_SIGTERM const #

const _SIGTERM = 0xf

_SIGTERM const #

const _SIGTERM = 0xf

_SIGTERM const #

const _SIGTERM = 0xf

_SIGTERM const #

const _SIGTERM = 0xf

_SIGTERM const #

const _SIGTERM = 0xf

_SIGTERM const #

const _SIGTERM = 0xf

_SIGTERM const #

const _SIGTERM = 0xf

_SIGTERM const #

const _SIGTERM = 0xf

_SIGTRAP const #

const _SIGTRAP = 0x5

_SIGTRAP const #

const _SIGTRAP = 0x5

_SIGTRAP const #

const _SIGTRAP = 0x5

_SIGTRAP const #

const _SIGTRAP = 0x5

_SIGTRAP const #

const _SIGTRAP = C.SIGTRAP

_SIGTRAP const #

const _SIGTRAP = 0x5

_SIGTRAP const #

const _SIGTRAP = 0x5

_SIGTRAP const #

const _SIGTRAP = 0x5

_SIGTRAP const #

const _SIGTRAP = 0x5

_SIGTRAP const #

const _SIGTRAP = 0x5

_SIGTRAP const #

const _SIGTRAP = 0x5

_SIGTRAP const #

const _SIGTRAP = 0x5

_SIGTRAP const #

const _SIGTRAP = 0x5

_SIGTRAP const #

const _SIGTRAP = 6

_SIGTRAP const #

const _SIGTRAP = 0x5

_SIGTRAP const #

const _SIGTRAP = 0x5

_SIGTRAP const #

const _SIGTRAP = 0x5

_SIGTRAP const #

const _SIGTRAP = 0x5

_SIGTRAP const #

const _SIGTRAP = 0x5

_SIGTRAP const #

const _SIGTRAP = 0x5

_SIGTRAP const #

const _SIGTRAP = 0x5

_SIGTRAP const #

const _SIGTRAP = 0x5

_SIGTRAP const #

const _SIGTRAP = 0x5

_SIGTRAP const #

const _SIGTRAP = 0x5

_SIGTRAP const #

const _SIGTRAP = 0x5

_SIGTRAP const #

const _SIGTRAP = 0x5

_SIGTRAP const #

const _SIGTRAP = 0x5

_SIGTRAP const #

const _SIGTRAP = 0x5

_SIGTRAP const #

Constants

const _SIGTRAP = 0x5

_SIGTRAP const #

const _SIGTRAP = 0x5

_SIGTRAP const #

const _SIGTRAP = 0x5

_SIGTRAP const #

const _SIGTRAP = 0x5

_SIGTRAP const #

const _SIGTRAP = 0x5

_SIGTRAP const #

const _SIGTRAP = 0x5

_SIGTSTP const #

const _SIGTSTP = 0x12

_SIGTSTP const #

const _SIGTSTP = 0x14

_SIGTSTP const #

const _SIGTSTP = 0x12

_SIGTSTP const #

const _SIGTSTP = 0x12

_SIGTSTP const #

const _SIGTSTP = 0x12

_SIGTSTP const #

Constants

const _SIGTSTP = 0x14

_SIGTSTP const #

const _SIGTSTP = 0x18

_SIGTSTP const #

const _SIGTSTP = 0x12

_SIGTSTP const #

const _SIGTSTP = 0x12

_SIGTSTP const #

const _SIGTSTP = 0x12

_SIGTSTP const #

const _SIGTSTP = 0x14

_SIGTSTP const #

const _SIGTSTP = 0x12

_SIGTSTP const #

const _SIGTSTP = 0x12

_SIGTSTP const #

const _SIGTSTP = 0x12

_SIGTSTP const #

const _SIGTSTP = 0x18

_SIGTSTP const #

const _SIGTSTP = 0x12

_SIGTSTP const #

const _SIGTSTP = 0x12

_SIGTSTP const #

const _SIGTSTP = 0x12

_SIGTSTP const #

const _SIGTSTP = 0x14

_SIGTSTP const #

const _SIGTSTP = 0x14

_SIGTSTP const #

const _SIGTSTP = 0x12

_SIGTSTP const #

const _SIGTSTP = 0x12

_SIGTSTP const #

const _SIGTSTP = 0x12

_SIGTSTP const #

const _SIGTSTP = 0x18

_SIGTSTP const #

const _SIGTSTP = 0x14

_SIGTSTP const #

const _SIGTSTP = 0x12

_SIGTSTP const #

const _SIGTSTP = 0x12

_SIGTSTP const #

const _SIGTSTP = 0x14

_SIGTSTP const #

const _SIGTSTP = C.SIGTSTP

_SIGTSTP const #

const _SIGTSTP = 0x14

_SIGTSTP const #

const _SIGTSTP = 0x12

_SIGTSTP const #

const _SIGTSTP = 0x14

_SIGTSTP const #

const _SIGTSTP = 0x12

_SIGTTIN const #

const _SIGTTIN = 0x15

_SIGTTIN const #

Constants

const _SIGTTIN = 0x15

_SIGTTIN const #

const _SIGTTIN = 0x15

_SIGTTIN const #

const _SIGTTIN = 0x15

_SIGTTIN const #

const _SIGTTIN = 0x15

_SIGTTIN const #

const _SIGTTIN = 0x15

_SIGTTIN const #

const _SIGTTIN = 0x15

_SIGTTIN const #

const _SIGTTIN = 0x15

_SIGTTIN const #

const _SIGTTIN = 0x15

_SIGTTIN const #

const _SIGTTIN = 0x1a

_SIGTTIN const #

const _SIGTTIN = 0x15

_SIGTTIN const #

const _SIGTTIN = 0x15

_SIGTTIN const #

const _SIGTTIN = 0x15

_SIGTTIN const #

const _SIGTTIN = 0x15

_SIGTTIN const #

const _SIGTTIN = 0x1a

_SIGTTIN const #

const _SIGTTIN = 0x15

_SIGTTIN const #

const _SIGTTIN = 0x15

_SIGTTIN const #

const _SIGTTIN = 0x15

_SIGTTIN const #

const _SIGTTIN = C.SIGTTIN

_SIGTTIN const #

const _SIGTTIN = 0x15

_SIGTTIN const #

const _SIGTTIN = 0x15

_SIGTTIN const #

const _SIGTTIN = 0x15

_SIGTTIN const #

const _SIGTTIN = 0x15

_SIGTTIN const #

const _SIGTTIN = 0x15

_SIGTTIN const #

const _SIGTTIN = 0x15

_SIGTTIN const #

const _SIGTTIN = 0x15

_SIGTTIN const #

const _SIGTTIN = 0x15

_SIGTTIN const #

const _SIGTTIN = 0x15

_SIGTTIN const #

const _SIGTTIN = 0x1a

_SIGTTIN const #

const _SIGTTIN = 0x15

_SIGTTIN const #

const _SIGTTIN = 0x15

_SIGTTIN const #

const _SIGTTIN = 0x15

_SIGTTIN const #

const _SIGTTIN = 0x15

_SIGTTOU const #

const _SIGTTOU = 0x16

_SIGTTOU const #

const _SIGTTOU = 0x16

_SIGTTOU const #

Constants

const _SIGTTOU = 0x16

_SIGTTOU const #

const _SIGTTOU = 0x16

_SIGTTOU const #

const _SIGTTOU = 0x1b

_SIGTTOU const #

const _SIGTTOU = 0x16

_SIGTTOU const #

const _SIGTTOU = 0x16

_SIGTTOU const #

const _SIGTTOU = 0x16

_SIGTTOU const #

const _SIGTTOU = 0x16

_SIGTTOU const #

const _SIGTTOU = 0x16

_SIGTTOU const #

const _SIGTTOU = 0x16

_SIGTTOU const #

const _SIGTTOU = 0x16

_SIGTTOU const #

const _SIGTTOU = 0x1b

_SIGTTOU const #

const _SIGTTOU = 0x16

_SIGTTOU const #

const _SIGTTOU = 0x16

_SIGTTOU const #

const _SIGTTOU = 0x16

_SIGTTOU const #

const _SIGTTOU = 0x16

_SIGTTOU const #

const _SIGTTOU = 0x16

_SIGTTOU const #

const _SIGTTOU = 0x16

_SIGTTOU const #

const _SIGTTOU = 0x16

_SIGTTOU const #

const _SIGTTOU = 0x16

_SIGTTOU const #

const _SIGTTOU = 0x1b

_SIGTTOU const #

const _SIGTTOU = 0x16

_SIGTTOU const #

const _SIGTTOU = 0x16

_SIGTTOU const #

const _SIGTTOU = 0x16

_SIGTTOU const #

const _SIGTTOU = 0x16

_SIGTTOU const #

const _SIGTTOU = 0x16

_SIGTTOU const #

const _SIGTTOU = 0x16

_SIGTTOU const #

const _SIGTTOU = 0x16

_SIGTTOU const #

const _SIGTTOU = C.SIGTTOU

_SIGTTOU const #

const _SIGTTOU = 0x16

_SIGTTOU const #

const _SIGTTOU = 0x16

_SIGTTOU const #

const _SIGTTOU = 0x16

_SIGURG const #

const _SIGURG = 0x17

_SIGURG const #

const _SIGURG = 0x17

_SIGURG const #

const _SIGURG = 0x10

_SIGURG const #

const _SIGURG = 0x10

_SIGURG const #

const _SIGURG = C.SIGURG

_SIGURG const #

const _SIGURG = 0x15

_SIGURG const #

const _SIGURG = 0x17

_SIGURG const #

const _SIGURG = 0x10

_SIGURG const #

const _SIGURG = 0x10

_SIGURG const #

const _SIGURG = 0x10

_SIGURG const #

const _SIGURG = 0x10

_SIGURG const #

const _SIGURG = 0x10

_SIGURG const #

const _SIGURG = 0x10

_SIGURG const #

const _SIGURG = 0x17

_SIGURG const #

const _SIGURG = 0x17

_SIGURG const #

const _SIGURG = 0x17

_SIGURG const #

const _SIGURG = 0x10

_SIGURG const #

const _SIGURG = 0x10

_SIGURG const #

const _SIGURG = 0x10

_SIGURG const #

const _SIGURG = 0x10

_SIGURG const #

const _SIGURG = 0x10

_SIGURG const #

const _SIGURG = 0x15

_SIGURG const #

const _SIGURG = 0x10

_SIGURG const #

Constants

const _SIGURG = 0x17

_SIGURG const #

const _SIGURG = 0x10

_SIGURG const #

const _SIGURG = 0x10

_SIGURG const #

const _SIGURG = 0x17

_SIGURG const #

const _SIGURG = 0x10

_SIGURG const #

const _SIGURG = 0x15

_SIGURG const #

const _SIGURG = 0x17

_SIGURG const #

const _SIGURG = 0x10

_SIGURG const #

const _SIGURG = 0x10

_SIGURG const #

const _SIGURG = 0x10

_SIGUSR1 const #

const _SIGUSR1 = 0x1e

_SIGUSR1 const #

const _SIGUSR1 = 0x1e

_SIGUSR1 const #

const _SIGUSR1 = 0x1e

_SIGUSR1 const #

const _SIGUSR1 = 0xa

_SIGUSR1 const #

const _SIGUSR1 = 0xa

_SIGUSR1 const #

Constants

const _SIGUSR1 = 0xa

_SIGUSR1 const #

const _SIGUSR1 = 0x1e

_SIGUSR1 const #

const _SIGUSR1 = 0x10

_SIGUSR1 const #

const _SIGUSR1 = 0xa

_SIGUSR1 const #

const _SIGUSR1 = 0x1e

_SIGUSR1 const #

const _SIGUSR1 = 0x1e

_SIGUSR1 const #

const _SIGUSR1 = 0x1e

_SIGUSR1 const #

const _SIGUSR1 = 0x10

_SIGUSR1 const #

const _SIGUSR1 = 0x1e

_SIGUSR1 const #

const _SIGUSR1 = 0x1e

_SIGUSR1 const #

const _SIGUSR1 = 0x1e

_SIGUSR1 const #

const _SIGUSR1 = 0x1e

_SIGUSR1 const #

const _SIGUSR1 = 0x1e

_SIGUSR1 const #

const _SIGUSR1 = 0xa

_SIGUSR1 const #

const _SIGUSR1 = 0x1e

_SIGUSR1 const #

const _SIGUSR1 = 0xa

_SIGUSR1 const #

const _SIGUSR1 = 0x1e

_SIGUSR1 const #

const _SIGUSR1 = 0x1e

_SIGUSR1 const #

const _SIGUSR1 = 0x1e

_SIGUSR1 const #

const _SIGUSR1 = 0x1e

_SIGUSR1 const #

const _SIGUSR1 = 0x1e

_SIGUSR1 const #

const _SIGUSR1 = 0x10

_SIGUSR1 const #

const _SIGUSR1 = 0xa

_SIGUSR1 const #

const _SIGUSR1 = 0x1e

_SIGUSR1 const #

const _SIGUSR1 = 0xa

_SIGUSR1 const #

const _SIGUSR1 = C.SIGUSR1

_SIGUSR1 const #

const _SIGUSR1 = 0x1e

_SIGUSR1 const #

const _SIGUSR1 = 0xa

_SIGUSR2 const #

const _SIGUSR2 = 0x1f

_SIGUSR2 const #

const _SIGUSR2 = 0x1f

_SIGUSR2 const #

const _SIGUSR2 = 0x1f

_SIGUSR2 const #

const _SIGUSR2 = 0xc

_SIGUSR2 const #

const _SIGUSR2 = 0x1f

_SIGUSR2 const #

const _SIGUSR2 = 0xc

_SIGUSR2 const #

const _SIGUSR2 = 0x1f

_SIGUSR2 const #

const _SIGUSR2 = 0x1f

_SIGUSR2 const #

const _SIGUSR2 = 0x1f

_SIGUSR2 const #

const _SIGUSR2 = 0xc

_SIGUSR2 const #

const _SIGUSR2 = 0x11

_SIGUSR2 const #

const _SIGUSR2 = 0x1f

_SIGUSR2 const #

const _SIGUSR2 = 0x1f

_SIGUSR2 const #

const _SIGUSR2 = 0xc

_SIGUSR2 const #

const _SIGUSR2 = 0x11

_SIGUSR2 const #

Constants

const _SIGUSR2 = 0xc

_SIGUSR2 const #

const _SIGUSR2 = 0x1f

_SIGUSR2 const #

const _SIGUSR2 = 0xc

_SIGUSR2 const #

const _SIGUSR2 = 0x1f

_SIGUSR2 const #

const _SIGUSR2 = C.SIGUSR2

_SIGUSR2 const #

const _SIGUSR2 = 0x1f

_SIGUSR2 const #

const _SIGUSR2 = 0xc

_SIGUSR2 const #

const _SIGUSR2 = 0x1f

_SIGUSR2 const #

const _SIGUSR2 = 0x1f

_SIGUSR2 const #

const _SIGUSR2 = 0xc

_SIGUSR2 const #

const _SIGUSR2 = 0x1f

_SIGUSR2 const #

const _SIGUSR2 = 0x1f

_SIGUSR2 const #

const _SIGUSR2 = 0xc

_SIGUSR2 const #

const _SIGUSR2 = 0x1f

_SIGUSR2 const #

const _SIGUSR2 = 0x11

_SIGUSR2 const #

const _SIGUSR2 = 0x1f

_SIGUSR2 const #

const _SIGUSR2 = 0x1f

_SIGUSR2 const #

const _SIGUSR2 = 0x1f

_SIGVTALRM const #

const _SIGVTALRM = 0x1a

_SIGVTALRM const #

const _SIGVTALRM = 0x1a

_SIGVTALRM const #

const _SIGVTALRM = 0x1c

_SIGVTALRM const #

const _SIGVTALRM = 0x22

_SIGVTALRM const #

const _SIGVTALRM = 0x1a

_SIGVTALRM const #

const _SIGVTALRM = 0x1a

_SIGVTALRM const #

const _SIGVTALRM = 0x1a

_SIGVTALRM const #

const _SIGVTALRM = 0x1a

_SIGVTALRM const #

const _SIGVTALRM = C.SIGVTALRM

_SIGVTALRM const #

const _SIGVTALRM = 0x1a

_SIGVTALRM const #

const _SIGVTALRM = 0x1a

_SIGVTALRM const #

Constants

const _SIGVTALRM = 0x1a

_SIGVTALRM const #

const _SIGVTALRM = 0x1a

_SIGVTALRM const #

const _SIGVTALRM = 0x1a

_SIGVTALRM const #

const _SIGVTALRM = 0x1a

_SIGVTALRM const #

const _SIGVTALRM = 0x1a

_SIGVTALRM const #

const _SIGVTALRM = 0x1a

_SIGVTALRM const #

const _SIGVTALRM = 0x1a

_SIGVTALRM const #

const _SIGVTALRM = 0x1a

_SIGVTALRM const #

const _SIGVTALRM = 0x1a

_SIGVTALRM const #

const _SIGVTALRM = 0x1c

_SIGVTALRM const #

const _SIGVTALRM = 0x1c

_SIGVTALRM const #

const _SIGVTALRM = 0x1a

_SIGVTALRM const #

const _SIGVTALRM = 0x1a

_SIGVTALRM const #

const _SIGVTALRM = 0x1a

_SIGVTALRM const #

const _SIGVTALRM = 0x1a

_SIGVTALRM const #

const _SIGVTALRM = 0x1a

_SIGVTALRM const #

const _SIGVTALRM = 0x1a

_SIGVTALRM const #

const _SIGVTALRM = 0x1a

_SIGVTALRM const #

const _SIGVTALRM = 0x1a

_SIGVTALRM const #

const _SIGVTALRM = 0x1a

_SIGVTALRM const #

const _SIGVTALRM = 0x1a

_SIGVTALRM const #

const _SIGVTALRM = 0x1a

_SIGWAITING const #

const _SIGWAITING = C.SIGWAITING

_SIGWAITING const #

const _SIGWAITING = 0x27

_SIGWFAULT const #

const _SIGWFAULT = 3

_SIGWINCH const #

const _SIGWINCH = 0x1c

_SIGWINCH const #

const _SIGWINCH = 0x1c

_SIGWINCH const #

const _SIGWINCH = 0x14

_SIGWINCH const #

const _SIGWINCH = 0x1c

_SIGWINCH const #

const _SIGWINCH = 0x1c

_SIGWINCH const #

const _SIGWINCH = 0x1c

_SIGWINCH const #

const _SIGWINCH = 0x1c

_SIGWINCH const #

const _SIGWINCH = 0x1c

_SIGWINCH const #

const _SIGWINCH = 0x1c

_SIGWINCH const #

const _SIGWINCH = 0x1c

_SIGWINCH const #

const _SIGWINCH = 0x1c

_SIGWINCH const #

const _SIGWINCH = C.SIGWINCH

_SIGWINCH const #

const _SIGWINCH = 0x1c

_SIGWINCH const #

const _SIGWINCH = 0x14

_SIGWINCH const #

const _SIGWINCH = 0x1c

_SIGWINCH const #

const _SIGWINCH = 0x1c

_SIGWINCH const #

const _SIGWINCH = 0x1c

_SIGWINCH const #

const _SIGWINCH = 0x1c

_SIGWINCH const #

const _SIGWINCH = 0x14

_SIGWINCH const #

const _SIGWINCH = 0x1c

_SIGWINCH const #

const _SIGWINCH = 0x1c

_SIGWINCH const #

Constants

const _SIGWINCH = 0x1c

_SIGWINCH const #

const _SIGWINCH = 0x1c

_SIGWINCH const #

const _SIGWINCH = 0x1c

_SIGWINCH const #

const _SIGWINCH = 0x1c

_SIGWINCH const #

const _SIGWINCH = 0x1c

_SIGWINCH const #

const _SIGWINCH = 0x1c

_SIGWINCH const #

const _SIGWINCH = 0x1c

_SIGWINCH const #

const _SIGWINCH = 0x1c

_SIGWINCH const #

const _SIGWINCH = 0x1c

_SIGWINCH const #

const _SIGWINCH = 0x1c

_SIGWINCH const #

const _SIGWINCH = 0x1c

_SIGWINCH const #

const _SIGWINCH = 0x1c

_SIGXCPU const #

const _SIGXCPU = 0x18

_SIGXCPU const #

const _SIGXCPU = 0x18

_SIGXCPU const #

const _SIGXCPU = 0x18

_SIGXCPU const #

const _SIGXCPU = 0x18

_SIGXCPU const #

const _SIGXCPU = 0x1e

_SIGXCPU const #

const _SIGXCPU = 0x1e

_SIGXCPU const #

const _SIGXCPU = 0x18

_SIGXCPU const #

const _SIGXCPU = C.SIGXCPU

_SIGXCPU const #

const _SIGXCPU = 0x18

_SIGXCPU const #

const _SIGXCPU = 0x18

_SIGXCPU const #

const _SIGXCPU = 0x18

_SIGXCPU const #

const _SIGXCPU = 0x18

_SIGXCPU const #

const _SIGXCPU = 0x18

_SIGXCPU const #

const _SIGXCPU = 0x18

_SIGXCPU const #

const _SIGXCPU = 0x18

_SIGXCPU const #

const _SIGXCPU = 0x18

_SIGXCPU const #

const _SIGXCPU = 0x18

_SIGXCPU const #

const _SIGXCPU = 0x18

_SIGXCPU const #

const _SIGXCPU = 0x18

_SIGXCPU const #

const _SIGXCPU = 0x18

_SIGXCPU const #

const _SIGXCPU = 0x18

_SIGXCPU const #

const _SIGXCPU = 0x18

_SIGXCPU const #

const _SIGXCPU = 0x18

_SIGXCPU const #

const _SIGXCPU = 0x18

_SIGXCPU const #

const _SIGXCPU = 0x18

_SIGXCPU const #

const _SIGXCPU = 0x18

_SIGXCPU const #

const _SIGXCPU = 0x18

_SIGXCPU const #

Constants

const _SIGXCPU = 0x18

_SIGXCPU const #

const _SIGXCPU = 0x18

_SIGXCPU const #

const _SIGXCPU = 0x1e

_SIGXCPU const #

const _SIGXCPU = 0x18

_SIGXCPU const #

const _SIGXCPU = 0x18

_SIGXCPU const #

const _SIGXCPU = 0x18

_SIGXFSZ const #

const _SIGXFSZ = 0x19

_SIGXFSZ const #

const _SIGXFSZ = 0x19

_SIGXFSZ const #

const _SIGXFSZ = 0x19

_SIGXFSZ const #

const _SIGXFSZ = 0x19

_SIGXFSZ const #

const _SIGXFSZ = 0x19

_SIGXFSZ const #

const _SIGXFSZ = 0x19

_SIGXFSZ const #

const _SIGXFSZ = 0x19

_SIGXFSZ const #

const _SIGXFSZ = 0x19

_SIGXFSZ const #

const _SIGXFSZ = 0x1f

_SIGXFSZ const #

Constants

const _SIGXFSZ = 0x19

_SIGXFSZ const #

const _SIGXFSZ = 0x19

_SIGXFSZ const #

const _SIGXFSZ = C.SIGXFSZ

_SIGXFSZ const #

const _SIGXFSZ = 0x19

_SIGXFSZ const #

const _SIGXFSZ = 0x19

_SIGXFSZ const #

const _SIGXFSZ = 0x19

_SIGXFSZ const #

const _SIGXFSZ = 0x19

_SIGXFSZ const #

const _SIGXFSZ = 0x1f

_SIGXFSZ const #

const _SIGXFSZ = 0x19

_SIGXFSZ const #

const _SIGXFSZ = 0x19

_SIGXFSZ const #

const _SIGXFSZ = 0x19

_SIGXFSZ const #

const _SIGXFSZ = 0x1f

_SIGXFSZ const #

const _SIGXFSZ = 0x19

_SIGXFSZ const #

const _SIGXFSZ = 0x19

_SIGXFSZ const #

const _SIGXFSZ = 0x19

_SIGXFSZ const #

const _SIGXFSZ = 0x19

_SIGXFSZ const #

const _SIGXFSZ = 0x19

_SIGXFSZ const #

const _SIGXFSZ = 0x19

_SIGXFSZ const #

const _SIGXFSZ = 0x19

_SIGXFSZ const #

const _SIGXFSZ = 0x19

_SIGXFSZ const #

const _SIGXFSZ = 0x19

_SIGXFSZ const #

const _SIGXFSZ = 0x19

_SIGXFSZ const #

const _SIGXFSZ = 0x19

_SIGXFSZ const #

const _SIGXFSZ = 0x19

_SIG_BLOCK const #

const _SIG_BLOCK = 1

_SIG_BLOCK const #

const _SIG_BLOCK = C.SIG_BLOCK

_SIG_BLOCK const #

const _SIG_BLOCK = 1

_SIG_BLOCK const #

const _SIG_BLOCK = 0

_SIG_BLOCK const #

const _SIG_BLOCK = 1

_SIG_BLOCK const #

const _SIG_BLOCK = 1

_SIG_BLOCK const #

const _SIG_BLOCK = 0

_SIG_BLOCK const #

const _SIG_BLOCK = 0x0

_SIG_BLOCK const #

const _SIG_BLOCK = 1

_SIG_BLOCK const #

const _SIG_BLOCK = 1

_SIG_BLOCK const #

const _SIG_BLOCK = 1

_SIG_DFL const #

const _SIG_DFL uintptr = 0

_SIG_IGN const #

const _SIG_IGN uintptr = 1

_SIG_SETMASK const #

const _SIG_SETMASK = 3

_SIG_SETMASK const #

const _SIG_SETMASK = 3

_SIG_SETMASK const #

const _SIG_SETMASK = 2

_SIG_SETMASK const #

const _SIG_SETMASK = 3

_SIG_SETMASK const #

const _SIG_SETMASK = C.SIG_SETMASK

_SIG_SETMASK const #

const _SIG_SETMASK = 3

_SIG_SETMASK const #

const _SIG_SETMASK = 2

_SIG_SETMASK const #

const _SIG_SETMASK = 3

_SIG_SETMASK const #

const _SIG_SETMASK = 0x2

_SIG_SETMASK const #

const _SIG_SETMASK = 3

_SIG_SETMASK const #

const _SIG_SETMASK = 3

_SIG_SETMASK const #

const _SIG_SETMASK = 3

_SIG_UNBLOCK const #

const _SIG_UNBLOCK = 2

_SIG_UNBLOCK const #

const _SIG_UNBLOCK = 2

_SIG_UNBLOCK const #

const _SIG_UNBLOCK = 2

_SIG_UNBLOCK const #

const _SIG_UNBLOCK = 2

_SIG_UNBLOCK const #

const _SIG_UNBLOCK = 2

_SIG_UNBLOCK const #

const _SIG_UNBLOCK = C.SIG_UNBLOCK

_SIG_UNBLOCK const #

const _SIG_UNBLOCK = 2

_SIG_UNBLOCK const #

const _SIG_UNBLOCK = 1

_SIG_UNBLOCK const #

const _SIG_UNBLOCK = 0x1

_SIG_UNBLOCK const #

const _SIG_UNBLOCK = 2

_SIG_UNBLOCK const #

const _SIG_UNBLOCK = 1

_SIG_UNBLOCK const #

const _SIG_UNBLOCK = 2

_SI_KERNEL const #

const _SI_KERNEL = 0x80

_SI_KERNEL const #

const _SI_KERNEL = 0x80

_SI_KERNEL const #

Constants

const _SI_KERNEL = 0x80

_SI_KERNEL const #

const _SI_KERNEL = 0x80

_SI_KERNEL const #

const _SI_KERNEL = 0x80

_SI_KERNEL const #

const _SI_KERNEL = 0x80

_SI_KERNEL const #

const _SI_KERNEL = 0x80

_SI_KERNEL const #

const _SI_KERNEL = 0x80

_SI_KERNEL const #

const _SI_KERNEL = 0x80

_SI_KERNEL const #

const _SI_KERNEL = 0x80

_SI_KERNEL const #

const _SI_KERNEL = 0x80

_SI_TIMER const #

const _SI_TIMER = *ast.UnaryExpr

_SI_TIMER const #

const _SI_TIMER = *ast.UnaryExpr

_SI_TIMER const #

const _SI_TIMER = *ast.UnaryExpr

_SI_TIMER const #

const _SI_TIMER = *ast.UnaryExpr

_SI_TIMER const #

const _SI_TIMER = *ast.UnaryExpr

_SI_TIMER const #

Constants

const _SI_TIMER = *ast.UnaryExpr

_SI_TIMER const #

const _SI_TIMER = *ast.UnaryExpr

_SI_TIMER const #

const _SI_TIMER = *ast.UnaryExpr

_SI_TIMER const #

const _SI_TIMER = *ast.UnaryExpr

_SI_TIMER const #

const _SI_TIMER = *ast.UnaryExpr

_SI_TIMER const #

const _SI_TIMER = *ast.UnaryExpr

_SI_TKILL const #

const _SI_TKILL = *ast.UnaryExpr

_SI_USER const #

const _SI_USER = 0

_SI_USER const #

const _SI_USER = 0x0

_SI_USER const #

const _SI_USER = 0

_SI_USER const #

const _SI_USER = 0

_SI_USER const #

const _SI_USER = 0

_SI_USER const #

const _SI_USER = 0

_SI_USER const #

const _SI_USER = 0

_SI_USER const #

const _SI_USER = 0x10001

_SI_USER const #

const _SI_USER = C.SI_USER

_SOCK_DGRAM const #

const _SOCK_DGRAM = 0x2

_SOCK_DGRAM const #

const _SOCK_DGRAM = 0x2

_SOCK_DGRAM const #

const _SOCK_DGRAM = 0x2

_SOCK_DGRAM const #

Constants

const _SOCK_DGRAM = 0x2

_SS_DISABLE const #

const _SS_DISABLE = 4

_SS_DISABLE const #

const _SS_DISABLE = 0x2

_SS_DISABLE const #

const _SS_DISABLE = 2

_SS_DISABLE const #

const _SS_DISABLE = C.SS_DISABLE

_SS_DISABLE const #

const _SS_DISABLE = 2

_SS_DISABLE const #

const _SS_DISABLE = 2

_SS_DISABLE const #

const _SS_DISABLE = 2

_SS_DISABLE const #

const _SS_DISABLE = 4

_SS_DISABLE const #

const _SS_DISABLE = 2

_SS_DISABLE const #

const _SS_DISABLE = 4

_SS_DISABLE const #

const _SS_DISABLE = 4

_SS_DISABLE const #

const _SS_DISABLE = 4

_STB_GLOBAL const #

const _STB_GLOBAL = 1

_STB_WEAK const #

const _STB_WEAK = 2

_STT_FUNC const #

const _STT_FUNC = 2

_STT_NOTYPE const #

const _STT_NOTYPE = 0

_SYS_SECCOMP const #

const _SYS_SECCOMP = 1

_SetConsoleCtrlHandler var #

Following syscalls are available on every Windows PC. All these variables are set by the Windows executable loader before the Go program starts.

var _SetConsoleCtrlHandler stdFunction

_SetErrorMode var #

Following syscalls are available on every Windows PC. All these variables are set by the Windows executable loader before the Go program starts.

var _SetErrorMode stdFunction

_SetEvent var #

Following syscalls are available on every Windows PC. All these variables are set by the Windows executable loader before the Go program starts.

var _SetEvent stdFunction

_SetProcessPriorityBoost var #

Following syscalls are available on every Windows PC. All these variables are set by the Windows executable loader before the Go program starts.

var _SetProcessPriorityBoost stdFunction

_SetThreadContext var #

Following syscalls are available on every Windows PC. All these variables are set by the Windows executable loader before the Go program starts.

var _SetThreadContext stdFunction

_SetThreadPriority var #

Following syscalls are available on every Windows PC. All these variables are set by the Windows executable loader before the Go program starts.

var _SetThreadPriority stdFunction

_SetUnhandledExceptionFilter var #

Following syscalls are available on every Windows PC. All these variables are set by the Windows executable loader before the Go program starts.

var _SetUnhandledExceptionFilter stdFunction

_SetWaitableTimer var #

Following syscalls are available on every Windows PC. All these variables are set by the Windows executable loader before the Go program starts.

var _SetWaitableTimer stdFunction

_SigDefault const #

Values for the flags field of a sigTabT.

const _SigDefault

_SigGoExit const #

Values for the flags field of a sigTabT.

const _SigGoExit

_SigIgn const #

Values for the flags field of a sigTabT.

const _SigIgn

_SigKill const #

Values for the flags field of a sigTabT.

const _SigKill

_SigNotify const #

Values for the flags field of a sigTabT.

const _SigNotify = *ast.BinaryExpr

_SigPanic const #

Values for the flags field of a sigTabT.

const _SigPanic

_SigSetStack const #

Values for the flags field of a sigTabT.

const _SigSetStack

_SigThrow const #

Values for the flags field of a sigTabT.

const _SigThrow

_SigUnblock const #

Values for the flags field of a sigTabT.

const _SigUnblock

_StackCacheSize const #

Per-P, per order stack segment cache size.

const _StackCacheSize = *ast.BinaryExpr

_SuspendThread var #

Following syscalls are available on every Windows PC. All these variables are set by the Windows executable loader before the Go program starts.

var _SuspendThread stdFunction

_SwitchToThread var #

Following syscalls are available on every Windows PC. All these variables are set by the Windows executable loader before the Go program starts.

var _SwitchToThread stdFunction

_THREAD_PRIORITY_HIGHEST const #

const _THREAD_PRIORITY_HIGHEST = 0x2

_TIMER_ABSTIME const #

const _TIMER_ABSTIME = 1

_TIMER_RELTIME const #

const _TIMER_RELTIME = 0

_TinySize const #

Tiny allocator parameters, see "Tiny allocator" comment in malloc.go.

const _TinySize = 16

_TinySizeClass const #

const _TinySizeClass = *ast.CallExpr

_TlsAlloc var #

Following syscalls are available on every Windows PC. All these variables are set by the Windows executable loader before the Go program starts.

var _TlsAlloc stdFunction

_UC_CPU const #

const _UC_CPU = 0x04

_UC_SIGMASK const #

From NetBSD's

const _UC_SIGMASK = 0x01

_UMTX_OP_WAIT_UINT const #

const _UMTX_OP_WAIT_UINT = 0xb

_UMTX_OP_WAIT_UINT const #

const _UMTX_OP_WAIT_UINT = 0xb

_UMTX_OP_WAIT_UINT const #

const _UMTX_OP_WAIT_UINT = 0xb

_UMTX_OP_WAIT_UINT const #

const _UMTX_OP_WAIT_UINT = 0xb

_UMTX_OP_WAIT_UINT const #

const _UMTX_OP_WAIT_UINT = 0xb

_UMTX_OP_WAIT_UINT_PRIVATE const #

const _UMTX_OP_WAIT_UINT_PRIVATE = 0xf

_UMTX_OP_WAIT_UINT_PRIVATE const #

const _UMTX_OP_WAIT_UINT_PRIVATE = 0xf

_UMTX_OP_WAIT_UINT_PRIVATE const #

const _UMTX_OP_WAIT_UINT_PRIVATE = 0xf

_UMTX_OP_WAIT_UINT_PRIVATE const #

const _UMTX_OP_WAIT_UINT_PRIVATE = 0xf

_UMTX_OP_WAIT_UINT_PRIVATE const #

const _UMTX_OP_WAIT_UINT_PRIVATE = 0xf

_UMTX_OP_WAKE const #

const _UMTX_OP_WAKE = 0x3

_UMTX_OP_WAKE const #

const _UMTX_OP_WAKE = 0x3

_UMTX_OP_WAKE const #

const _UMTX_OP_WAKE = 0x3

_UMTX_OP_WAKE const #

const _UMTX_OP_WAKE = 0x3

_UMTX_OP_WAKE const #

const _UMTX_OP_WAKE = 0x3

_UMTX_OP_WAKE_PRIVATE const #

const _UMTX_OP_WAKE_PRIVATE = 0x10

_UMTX_OP_WAKE_PRIVATE const #

const _UMTX_OP_WAKE_PRIVATE = 0x10

_UMTX_OP_WAKE_PRIVATE const #

const _UMTX_OP_WAKE_PRIVATE = 0x10

_UMTX_OP_WAKE_PRIVATE const #

const _UMTX_OP_WAKE_PRIVATE = 0x10

_UMTX_OP_WAKE_PRIVATE const #

const _UMTX_OP_WAKE_PRIVATE = 0x10

_VDSO_TH_ALGO_ARM_GENTIM const #

const _VDSO_TH_ALGO_ARM_GENTIM = 1

_VDSO_TH_ALGO_ARM_GENTIM const #

const _VDSO_TH_ALGO_ARM_GENTIM = 1

_VDSO_TH_ALGO_RISCV_RDTIME const #

const _VDSO_TH_ALGO_RISCV_RDTIME = 1

_VDSO_TH_ALGO_X86_HPET const #

const _VDSO_TH_ALGO_X86_HPET = 2

_VDSO_TH_ALGO_X86_TSC const #

const _VDSO_TH_ALGO_X86_TSC = 1

_VDSO_TH_NUM const #

const _VDSO_TH_NUM = 4

_VDSO_TK_VER_CURR const #

const _VDSO_TK_VER_CURR = C.VDSO_TK_VER_CURR

_VDSO_TK_VER_CURR const #

const _VDSO_TK_VER_CURR = 0x1

_VDSO_TK_VER_CURR const #

const _VDSO_TK_VER_CURR = 0x1

_VDSO_TK_VER_CURR const #

const _VDSO_TK_VER_CURR = 0x1

_VDSO_TK_VER_CURR const #

const _VDSO_TK_VER_CURR = 0x1

_VDSO_TK_VER_CURR const #

const _VDSO_TK_VER_CURR = 0x1

_VER_FLG_BASE const #

const _VER_FLG_BASE = 0x1

_VM_REGION_BASIC_INFO_64 const #

const _VM_REGION_BASIC_INFO_64 = 0x9

_VM_REGION_BASIC_INFO_64 const #

const _VM_REGION_BASIC_INFO_64 = 0x9

_VM_REGION_BASIC_INFO_COUNT_64 const #

const _VM_REGION_BASIC_INFO_COUNT_64 = 0x9

_VM_REGION_BASIC_INFO_COUNT_64 const #

const _VM_REGION_BASIC_INFO_COUNT_64 = 0x9

_VirtualAlloc var #

Following syscalls are available on every Windows PC. All these variables are set by the Windows executable loader before the Go program starts.

var _VirtualAlloc stdFunction

_VirtualFree var #

Following syscalls are available on every Windows PC. All these variables are set by the Windows executable loader before the Go program starts.

var _VirtualFree stdFunction

_VirtualQuery var #

Following syscalls are available on every Windows PC. All these variables are set by the Windows executable loader before the Go program starts.

var _VirtualQuery stdFunction

_WAIT_TIMEOUT const #

const _WAIT_TIMEOUT = 0x102

_WER_FAULT_REPORTING_NO_UI const #

const _WER_FAULT_REPORTING_NO_UI = 0x0020

_WaitForMultipleObjects var #

Following syscalls are available on every Windows PC. All these variables are set by the Windows executable loader before the Go program starts.

var _WaitForMultipleObjects stdFunction

_WaitForSingleObject var #

Following syscalls are available on every Windows PC. All these variables are set by the Windows executable loader before the Go program starts.

var _WaitForSingleObject stdFunction

_WerGetFlags var #

Following syscalls are available on every Windows PC. All these variables are set by the Windows executable loader before the Go program starts.

var _WerGetFlags stdFunction

_WerSetFlags var #

Following syscalls are available on every Windows PC. All these variables are set by the Windows executable loader before the Go program starts.

var _WerSetFlags stdFunction

_WorkbufSize const #

const _WorkbufSize = 2048

_WriteConsoleW var #

Following syscalls are available on every Windows PC. All these variables are set by the Windows executable loader before the Go program starts.

var _WriteConsoleW stdFunction

_WriteFile var #

Following syscalls are available on every Windows PC. All these variables are set by the Windows executable loader before the Go program starts.

var _WriteFile stdFunction

__SC_NPROCESSORS_ONLN const #

const __SC_NPROCESSORS_ONLN = 0x48

__SC_NPROCESSORS_ONLN const #

const __SC_NPROCESSORS_ONLN = 0xf

__SC_NPROCESSORS_ONLN const #

const __SC_NPROCESSORS_ONLN = C._SC_NPROCESSORS_ONLN

__SC_PAGESIZE const #

const __SC_PAGESIZE = 0xb

__SC_PAGE_SIZE const #

const __SC_PAGE_SIZE = C._SC_PAGE_SIZE

__SC_PAGE_SIZE const #

const __SC_PAGE_SIZE = 0x30

__sanitizer_cov_8bit_counters_init var #

go:linkname __sanitizer_cov_8bit_counters_init __sanitizer_cov_8bit_counters_init go:cgo_import_static __sanitizer_cov_8bit_counters_init

var __sanitizer_cov_8bit_counters_init byte

__sanitizer_cov_pcs_init var #

go:linkname __sanitizer_cov_pcs_init __sanitizer_cov_pcs_init go:cgo_import_static __sanitizer_cov_pcs_init

var __sanitizer_cov_pcs_init byte

__sanitizer_cov_trace_cmp1 var #

go:linkname __sanitizer_cov_trace_cmp1 __sanitizer_cov_trace_cmp1 go:cgo_import_static __sanitizer_cov_trace_cmp1

var __sanitizer_cov_trace_cmp1 byte

__sanitizer_cov_trace_cmp2 var #

go:linkname __sanitizer_cov_trace_cmp2 __sanitizer_cov_trace_cmp2 go:cgo_import_static __sanitizer_cov_trace_cmp2

var __sanitizer_cov_trace_cmp2 byte

__sanitizer_cov_trace_cmp4 var #

go:linkname __sanitizer_cov_trace_cmp4 __sanitizer_cov_trace_cmp4 go:cgo_import_static __sanitizer_cov_trace_cmp4

var __sanitizer_cov_trace_cmp4 byte

__sanitizer_cov_trace_cmp8 var #

go:linkname __sanitizer_cov_trace_cmp8 __sanitizer_cov_trace_cmp8 go:cgo_import_static __sanitizer_cov_trace_cmp8

var __sanitizer_cov_trace_cmp8 byte

__sanitizer_cov_trace_const_cmp1 var #

go:linkname __sanitizer_cov_trace_const_cmp1 __sanitizer_cov_trace_const_cmp1 go:cgo_import_static __sanitizer_cov_trace_const_cmp1

var __sanitizer_cov_trace_const_cmp1 byte

__sanitizer_cov_trace_const_cmp2 var #

go:linkname __sanitizer_cov_trace_const_cmp2 __sanitizer_cov_trace_const_cmp2 go:cgo_import_static __sanitizer_cov_trace_const_cmp2

var __sanitizer_cov_trace_const_cmp2 byte

__sanitizer_cov_trace_const_cmp4 var #

go:linkname __sanitizer_cov_trace_const_cmp4 __sanitizer_cov_trace_const_cmp4 go:cgo_import_static __sanitizer_cov_trace_const_cmp4

var __sanitizer_cov_trace_const_cmp4 byte

__sanitizer_cov_trace_const_cmp8 var #

go:linkname __sanitizer_cov_trace_const_cmp8 __sanitizer_cov_trace_const_cmp8 go:cgo_import_static __sanitizer_cov_trace_const_cmp8

var __sanitizer_cov_trace_const_cmp8 byte

__sanitizer_weak_hook_strcmp var #

go:linkname __sanitizer_weak_hook_strcmp __sanitizer_weak_hook_strcmp go:cgo_import_static __sanitizer_weak_hook_strcmp

var __sanitizer_weak_hook_strcmp byte

__start___sancov_cntrs var #

start, stop markers of counters, set by the linker

var __start___sancov_cntrs byte

__stop___sancov_cntrs var #

start, stop markers of counters, set by the linker

var __stop___sancov_cntrs byte

__tsan_acquire var #

go:linkname __tsan_acquire __tsan_acquire

var __tsan_acquire byte

__tsan_finalizer_goroutine var #

go:linkname __tsan_finalizer_goroutine __tsan_finalizer_goroutine

var __tsan_finalizer_goroutine byte

__tsan_fini var #

go:linkname __tsan_fini __tsan_fini

var __tsan_fini byte

__tsan_free var #

go:linkname __tsan_free __tsan_free

var __tsan_free byte

__tsan_go_end var #

go:linkname __tsan_go_end __tsan_go_end

var __tsan_go_end byte

__tsan_go_ignore_sync_begin var #

go:linkname __tsan_go_ignore_sync_begin __tsan_go_ignore_sync_begin

var __tsan_go_ignore_sync_begin byte

__tsan_go_ignore_sync_end var #

go:linkname __tsan_go_ignore_sync_end __tsan_go_ignore_sync_end

var __tsan_go_ignore_sync_end byte

__tsan_go_start var #

go:linkname __tsan_go_start __tsan_go_start

var __tsan_go_start byte

__tsan_init var #

Race runtime functions called via runtime·racecall. go:linkname __tsan_init __tsan_init

var __tsan_init byte

__tsan_malloc var #

go:linkname __tsan_malloc __tsan_malloc

var __tsan_malloc byte

__tsan_map_shadow var #

go:linkname __tsan_map_shadow __tsan_map_shadow

var __tsan_map_shadow byte

__tsan_proc_create var #

go:linkname __tsan_proc_create __tsan_proc_create

var __tsan_proc_create byte

__tsan_proc_destroy var #

go:linkname __tsan_proc_destroy __tsan_proc_destroy

var __tsan_proc_destroy byte

__tsan_release var #

go:linkname __tsan_release __tsan_release

var __tsan_release byte

__tsan_release_acquire var #

go:linkname __tsan_release_acquire __tsan_release_acquire

var __tsan_release_acquire byte

__tsan_release_merge var #

go:linkname __tsan_release_merge __tsan_release_merge

var __tsan_release_merge byte

__tsan_report_count var #

go:linkname __tsan_report_count __tsan_report_count

var __tsan_report_count byte

_badsignal var #

var _badsignal = *ast.CallExpr

_cgo_bindm var #

var _cgo_bindm unsafe.Pointer

_cgo_callers var #

var _cgo_callers unsafe.Pointer

_cgo_getstackbound var #

var _cgo_getstackbound unsafe.Pointer

_cgo_init var #

var _cgo_init unsafe.Pointer

_cgo_mmap var #

_cgo_mmap is filled in by runtime/cgo when it is linked into the program, so it is only non-nil when using cgo. go:linkname _cgo_mmap _cgo_mmap

var _cgo_mmap unsafe.Pointer

_cgo_munmap var #

_cgo_munmap is filled in by runtime/cgo when it is linked into the program, so it is only non-nil when using cgo. go:linkname _cgo_munmap _cgo_munmap

var _cgo_munmap unsafe.Pointer

_cgo_notify_runtime_init_done var #

var _cgo_notify_runtime_init_done unsafe.Pointer

_cgo_pthread_key_created var #

var _cgo_pthread_key_created unsafe.Pointer

_cgo_set_context_function var #

var _cgo_set_context_function unsafe.Pointer

_cgo_setenv var #

_cgo_setenv should be an internal detail, but widely used packages access it using linkname. Notable members of the hall of shame include: - github.com/ebitengine/purego Do not remove or change the type signature. See go.dev/issue/67401. go:linkname _cgo_setenv

var _cgo_setenv unsafe.Pointer

_cgo_sigaction var #

_cgo_sigaction is filled in by runtime/cgo when it is linked into the program, so it is only non-nil when using cgo. go:linkname _cgo_sigaction _cgo_sigaction

var _cgo_sigaction unsafe.Pointer

_cgo_sys_thread_create var #

var _cgo_sys_thread_create unsafe.Pointer

_cgo_thread_start var #

var _cgo_thread_start unsafe.Pointer

_cgo_unsetenv var #

_cgo_unsetenv should be an internal detail, but widely used packages access it using linkname. Notable members of the hall of shame include: - github.com/ebitengine/purego Do not remove or change the type signature. See go.dev/issue/67401. go:linkname _cgo_unsetenv

var _cgo_unsetenv unsafe.Pointer

_cgo_yield var #

var _cgo_yield unsafe.Pointer

_si_max_size const #

const _si_max_size = 128

_sigev_max_size const #

const _sigev_max_size = 64

_sunosEAGAIN const #

const _sunosEAGAIN = 11

_sunosMAP_NORESERVE const #

Indicates not to reserve swap space for the mapping.

const _sunosMAP_NORESERVE = 0x40

_timeBeginPeriod var #

These are from non-kernel32.dll, so we prefer to LoadLibraryEx them.

var _timeBeginPeriod stdFunction

_timeEndPeriod var #

These are from non-kernel32.dll, so we prefer to LoadLibraryEx them.

var _timeEndPeriod stdFunction

abiPartBad const #

const abiPartBad abiPartKind = iota

abiPartReg const #

const abiPartReg

abiPartStack const #

const abiPartStack

active_spin const #

This implementation depends on OS-specific implementations of func semacreate(mp *m) Create a semaphore for mp, if it does not already have one. func semasleep(ns int64) int32 If ns < 0, acquire m's semaphore and return 0. If ns >= 0, try to acquire m's semaphore for at most ns nanoseconds. Return 0 if the semaphore was acquired, -1 if interrupted or timed out. func semawakeup(mp *m) Wake up mp, which is or will soon be sleeping on its semaphore.

const active_spin = 4

active_spin const #

const active_spin = 4

active_spin const #

const active_spin = 4

active_spin const #

const active_spin = 4

active_spin const #

const active_spin = 4

active_spin_cnt const #

const active_spin_cnt = 30

active_spin_cnt const #

const active_spin_cnt = 30

active_spin_cnt const #

const active_spin_cnt = 30

active_spin_cnt const #

This implementation depends on OS-specific implementations of func semacreate(mp *m) Create a semaphore for mp, if it does not already have one. func semasleep(ns int64) int32 If ns < 0, acquire m's semaphore and return 0. If ns >= 0, try to acquire m's semaphore for at most ns nanoseconds. Return 0 if the semaphore was acquired, -1 if interrupted or timed out. func semawakeup(mp *m) Wake up mp, which is or will soon be sleeping on its semaphore.

const active_spin_cnt = 30

active_spin_cnt const #

const active_spin_cnt = 30

addrBits const #

addrBits is the number of bits needed to represent a virtual address. See heapAddrBits for a table of address space sizes on various architectures. 48 bits is enough for all architectures except s390x. On AMD64, virtual addresses are 48-bit (or 57-bit) numbers sign extended to 64. We shift the address left 16 to eliminate the sign extended part and make room in the bottom for the count. On s390x, virtual addresses are 64-bit. There's not much we can do about this, so we just hope that the kernel doesn't get to really high addresses and panic if it does.

const addrBits = 48

addrspace_vec var #

var addrspace_vec [1]byte

adjustSignalStack2Indirect var #

var adjustSignalStack2Indirect = adjustSignalStack2

adviseUnused var #

var adviseUnused = *ast.CallExpr

aeskeysched var #

used in asm_{386,amd64,arm64}.s to seed the hash function

var aeskeysched [hashRandomBytes]byte

agg var #

agg is used by readMetrics, and is protected by metricsSema. Managed as a global variable because its pointer will be an argument to a dynamically-defined function, and we'd like to avoid it escaping to the heap.

var agg statAggregate

aixAddrBits const #

On AIX, 64-bit addresses are split into 36-bit segment number and 28-bit offset in segment. Segment numbers in the range 0x0A0000000-0x0AFFFFFFF(LSA) are available for mmap. We assume all tagged addresses are from memory allocated with mmap. We use one bit to distinguish between the two ranges.

const aixAddrBits = 57

aixStaticDataBase var #

aixStaticDataBase (used only on AIX) holds the unrelocated address of the data section, set by the linker. On AIX, an R_ADDR relocation from an RODATA symbol to a DATA symbol does not work, as the dynamic loader can change the address of the data section, and it is not possible to apply a dynamic relocation to RODATA. In order to get the correct address, we need to apply the delta between unrelocated and relocated data section addresses. aixStaticDataBase is the unrelocated address, and moduledata.data is the relocated one.

var aixStaticDataBase uintptr

aixTagBits const #

const aixTagBits = *ast.BinaryExpr

allDeadlineNotes var #

Linked list of notes with a deadline.

var allDeadlineNotes *note

allDloggers var #

allDloggers is a list of all dloggers, linked through dlogger.allLink. This is accessed atomically. This is prepend only, so it doesn't need to protect against ABA races.

var allDloggers *dloggerImpl

allfin var #

var allfin *finblock

allglen var #

allglen and allgptr are atomic variables that contain len(allgs) and &allgs[0] respectively. Proper ordering depends on totally-ordered loads and stores. Writes are protected by allglock. allgptr is updated before allglen. Readers should read allglen before allgptr to ensure that allglen is always <= len(allgptr). New Gs appended during the race can be missed. For a consistent view of all Gs, allglock must be held. allgptr copies should always be stored as a concrete type or unsafe.Pointer, not uintptr, to ensure that GC can still reach it even if it points to a stale array.

var allglen uintptr

allglock var #

allgs contains all Gs ever created (including dead Gs), and thus never shrinks. Access via the slice is protected by allglock or stop-the-world. Readers that cannot take the lock may (carefully!) use the atomic variables below.

var allglock mutex

allgptr var #

var allgptr **g

allgs var #

var allgs []*g

allm var #

var allm *m

allocmLock var #

allocmLock is locked for read when creating new Ms in allocm and their addition to allm. Thus acquiring this lock for write blocks the creation of new Ms.

var allocmLock rwmutex

allp var #

len(allp) == gomaxprocs; may change at safe points, otherwise immutable.

var allp []*p

allpLock var #

allpLock protects P-less reads and size changes of allp, idlepMask, and timerpMask, and all writes to allp.

var allpLock mutex

arenaBaseOffset const #

arenaBaseOffset is the pointer value that corresponds to index 0 in the heap arena map. On amd64, the address space is 48 bits, sign extended to 64 bits. This offset lets us handle "negative" addresses (or high addresses if viewed as unsigned). On aix/ppc64, this offset allows to keep the heapAddrBits to 48. Otherwise, it would be 60 in order to handle mmap addresses (in range 0x0a00000000000000 - 0x0afffffffffffff). But in this case, the memory reserved in (s *pageAlloc).init for chunks is causing important slowdowns. On other platforms, the user address space is contiguous and starts at 0, so no offset is necessary.

const arenaBaseOffset = *ast.BinaryExpr

arenaBaseOffsetUintptr const #

A typed version of this constant that will make it into DWARF (for viewcore).

const arenaBaseOffsetUintptr = *ast.CallExpr

arenaBits const #

arenaBits is the total bits in a combined arena map index. This is split between the index into the L1 arena map and the L2 arena map.

const arenaBits = *ast.BinaryExpr

arenaL1Bits const #

arenaL1Bits is the number of bits of the arena number covered by the first level arena map. This number should be small, since the first level arena map requires PtrSize*(1<

const arenaL1Bits = *ast.BinaryExpr

arenaL1Shift const #

arenaL1Shift is the number of bits to shift an arena frame number by to compute an index into the first level arena map.

const arenaL1Shift = arenaL2Bits

arenaL2Bits const #

arenaL2Bits is the number of bits of the arena number covered by the second level arena index. The size of each arena map allocation is proportional to 1<

const arenaL2Bits = *ast.BinaryExpr

argc var #

var argc int32

argslice var #

var argslice []string

argv var #

var argv **byte

arm64HasATOMICS var #

var arm64HasATOMICS bool

arm64UseAlignedLoads var #

var arm64UseAlignedLoads bool

armHasVFPv4 var #

var armHasVFPv4 bool

asanenabled const #

const asanenabled = false

asanenabled const #

Private interface for the runtime.

const asanenabled = true

asmstdcallAddr var #

var asmstdcallAddr unsafe.Pointer

asmsyscall6 var #

asmsyscall6 calls the libc symbol using a C convention. It's defined in sys_aix_ppc64.go.

var asmsyscall6 libFunc

asmsysvicall6x var #

go:linkname asmsysvicall6x runtime.asmsysvicall6

var asmsysvicall6x libcFunc

asyncPreemptStack var #

asyncPreemptStack is the bytes of stack space required to inject an asyncPreempt call.

var asyncPreemptStack = *ast.UnaryExpr

auxv var #

auxv is populated on relevant platforms but defined here for all platforms so x/sys/cpu can assume the getAuxv symbol exists without keeping its list of auxv-using GOOS build tags in sync. It contains an even number of elements, (tag, value) pairs.

var auxv []uintptr

auxvreadbuf var #

var auxvreadbuf [128]uintptr

avxSupported const #

avxSupported indicates that the CPU supports AVX instructions.

const avxSupported = *ast.BinaryExpr

bbuckets var #

var bbuckets atomic.UnsafePointer

bcryptprimitivesdll var #

var bcryptprimitivesdll = [...]uint16{...}

bias32 const #

const bias32 = *ast.BinaryExpr

bias64 const #

const bias64 = *ast.BinaryExpr

binuptimeDummy var #

binuptimeDummy is used in binuptime as the address of an atomic.Load, to simulate an atomic_thread_fence_acq() call which behaves as an instruction reordering and memory barrier.

var binuptimeDummy uint32

bloc var #

var bloc uintptr

blocMax var #

var blocMax uintptr

blockProfile const #

const blockProfile

blockprofilerate var #

var blockprofilerate uint64

boringCaches var #

var boringCaches []unsafe.Pointer

boundsConvert const #

const boundsConvert

boundsErrorFmts var #

boundsErrorFmts provide error text for various out-of-bounds panics. Note: if you change these strings, you should adjust the size of the buffer in boundsError.Error below as well.

var boundsErrorFmts = [...]string{...}

boundsIndex const #

const boundsIndex boundsErrorCode = iota

boundsNegErrorFmts var #

boundsNegErrorFmts are overriding formats if x is negative. In this case there's no need to report y.

var boundsNegErrorFmts = [...]string{...}

boundsSlice3Acap const #

const boundsSlice3Acap

boundsSlice3Alen const #

const boundsSlice3Alen

boundsSlice3B const #

const boundsSlice3B

boundsSlice3C const #

const boundsSlice3C

boundsSliceAcap const #

const boundsSliceAcap

boundsSliceAlen const #

const boundsSliceAlen

boundsSliceB const #

const boundsSliceB

buckHashSize const #

size of bucket hash table

const buckHashSize = 179999

bucketCntBits const #

Maximum number of key/elem pairs a bucket can hold.

const bucketCntBits = abi.OldMapBucketCountBits

buckhash var #

var buckhash atomic.UnsafePointer

buf var #

var buf [bufSize]byte

bufSize const #

buffer of pending write data

const bufSize = 4096

buildVersion var #

buildVersion is the Go tree's version string at build time. If any GOEXPERIMENTs are set to non-default values, it will include "X:". This is set by the linker. This is accessed by "go version ".

var buildVersion string

c0 const #

const c0 = *ast.CallExpr

c1 const #

const c1 = *ast.CallExpr

callbackFirstVCH const #

const callbackFirstVCH

callbackLastVCH const #

const callbackLastVCH

callbackMaxFrame const #

const callbackMaxFrame = *ast.BinaryExpr

callbackVEH const #

const callbackVEH = iota

canCreateFile const #

const canCreateFile = false

canCreateFile const #

const canCreateFile = true

canUseLongPaths var #

go:linkname canUseLongPaths internal/syscall/windows.CanUseLongPaths

var canUseLongPaths bool

capacityPerProc const #

capacityPerProc is the limiter's bucket capacity for each P in GOMAXPROCS.

const capacityPerProc = 1e9

casgstatusAlwaysTrack var #

casgstatusAlwaysTrack is a debug flag that causes casgstatus to always track various latencies on every transition instead of sampling them.

var casgstatusAlwaysTrack = false

cb_max const #

const cb_max = 2000

cbs var #

cbs stores all registered Go callbacks.

var cbs struct{...}

cgoAlwaysFalse var #

cgoAlwaysFalse is a boolean value that is always false. The cgo-generated code says if cgoAlwaysFalse { cgoUse(p) }, or if cgoAlwaysFalse { cgoKeepAlive(p) }. The compiler cannot see that cgoAlwaysFalse is always false, so it emits the test and keeps the call, giving the desired escape/alive analysis result. The test is cheaper than the call.

var cgoAlwaysFalse bool

cgoCheckPointerFail const #

const cgoCheckPointerFail = "cgo argument has Go pointer to unpinned Go pointer"

cgoContext var #

var cgoContext unsafe.Pointer

cgoHasExtraM var #

cgoHasExtraM is set on startup when an extra M is created for cgo. The extra M must be created before any C/C++ code calls cgocallback.

var cgoHasExtraM bool

cgoResultFail const #

const cgoResultFail = "cgo result is unpinned Go pointer or points to unpinned Go pointer"

cgoSymbolizer var #

var cgoSymbolizer unsafe.Pointer

cgoThreadStart var #

When running with cgo, we call _cgo_thread_start to start threads for us so that we can play nicely with foreign code.

var cgoThreadStart unsafe.Pointer

cgoTraceback var #

var cgoTraceback unsafe.Pointer

cgoWriteBarrierFail const #

const cgoWriteBarrierFail = "unpinned Go pointer stored into non-Go memory"

cgo_yield var #

var cgo_yield = *ast.UnaryExpr

chanrecvpc var #

var chanrecvpc = *ast.CallExpr

chansendpc var #

var chansendpc = *ast.CallExpr

class_to_allocnpages var #

var class_to_allocnpages = [_NumSizeClasses]uint8{...}

class_to_divmagic var #

var class_to_divmagic = [_NumSizeClasses]uint32{...}

class_to_size var #

var class_to_size = [_NumSizeClasses]uint16{...}

clobberdeadPtr const #

clobberdeadPtr is a special value that is used by the compiler to clobber dead stack slots, when -clobberdead flag is set.

const clobberdeadPtr = *ast.CallExpr

clockMonotonic const #

const clockMonotonic clockid = 1

clockRealtime const #

const clockRealtime clockid = 0

cloneFlags const #

Clone, the Linux rfork.

const cloneFlags = *ast.BinaryExpr

concurrentSweep const #

concurrentSweep is a debug flag. Disabling this flag ensures all spans are swept while the world is stopped.

const concurrentSweep = true

controlWord64 var #

Floating point control word values. Bits 0-5 are bits to disable floating-point exceptions. Bits 8-9 are the precision control: 0 = single precision a.k.a. float32 2 = double precision a.k.a. float64 Bits 10-11 are the rounding mode: 0 = round to nearest (even on a tie) 3 = round toward zero

var controlWord64 uint16 = *ast.BinaryExpr

controlWord64trunc var #

Floating point control word values. Bits 0-5 are bits to disable floating-point exceptions. Bits 8-9 are the precision control: 0 = single precision a.k.a. float32 2 = double precision a.k.a. float64 Bits 10-11 are the rounding mode: 0 = round to nearest (even on a tie) 3 = round toward zero

var controlWord64trunc uint16 = *ast.BinaryExpr

cpuStatsDep const #

const cpuStatsDep

cpuprof var #

var cpuprof cpuProfile

crashFD var #

crashFD is an optional file descriptor to use for fatal panics, as set by debug.SetCrashOutput (see #42888). If it is a valid fd (not all ones), writeErr and related functions write to it in addition to standard error. Initialized to -1 in schedinit.

var crashFD atomic.Uintptr

crashStackImplemented const #

Disable crash stack on Windows for now. Apparently, throwing an exception on a non-system-allocated crash stack causes EXCEPTION_STACK_OVERFLOW and hangs the process (see issue 63938).

const crashStackImplemented = *ast.BinaryExpr

crashing var #

crashing is the number of m's we have waited for when implementing GOTRACEBACK=crash when a signal is received.

var crashing atomic.Int32

crashingG var #

var crashingG *ast.IndexExpr

currentProcess const #

const currentProcess = *ast.UnaryExpr

currentThread const #

const currentThread = *ast.UnaryExpr

dash var #

var dash = [...]byte{...}

dataOffset const #

data offset should be the size of the bmap struct, but needs to be aligned correctly. For amd64p32 this means 64-bit alignment even though pointers are 32 bit.

const dataOffset = *ast.CallExpr

dbgvars var #

var dbgvars = []*dbgVar{...}

deadlock var #

var deadlock mutex

debug var #

Holds variables parsed from GODEBUG env var, except for "memprofilerate" since there is an existing int var for that value, which may already have an initial value.

var debug struct{...}

debugCallRuntime const #

const debugCallRuntime = "call from within the Go runtime"

debugCallSystemStack const #

const debugCallSystemStack = "executing on Go runtime stack"

debugCallUnknownFunc const #

const debugCallUnknownFunc = "call from unknown function"

debugCallUnsafePoint const #

const debugCallUnsafePoint = "call not at safe point"

debugChan const #

const debugChan = false

debugCheckBP const #

check the BP links during traceback.

const debugCheckBP = false

debugLogBoolFalse const #

const debugLogBoolFalse

debugLogBoolTrue const #

const debugLogBoolTrue

debugLogBytes const #

debugLogBytes is the size of each per-M ring buffer. This is allocated off-heap to avoid blowing up the M and hence the GC'd heap size.

const debugLogBytes = *ast.BinaryExpr

debugLogConstString const #

const debugLogConstString

debugLogHeaderSize const #

debugLogHeaderSize is the number of bytes in the framing header of every dlog record.

const debugLogHeaderSize = 2

debugLogHex const #

const debugLogHex

debugLogInt const #

const debugLogInt

debugLogPC const #

const debugLogPC

debugLogPtr const #

const debugLogPtr

debugLogString const #

const debugLogString

debugLogStringLimit const #

debugLogStringLimit is the maximum number of bytes in a string. Above this, the string will be truncated with "..(n more bytes).."

const debugLogStringLimit = *ast.BinaryExpr

debugLogStringOverflow const #

const debugLogStringOverflow

debugLogSyncSize const #

debugLogSyncSize is the number of bytes in a sync record.

const debugLogSyncSize = *ast.BinaryExpr

debugLogTraceback const #

const debugLogTraceback

debugLogUint const #

const debugLogUint

debugLogUnknown const #

const debugLogUnknown = *ast.BinaryExpr

debugPcln const #

const debugPcln = false

debugPinnerKeepUnpin var #

debugPinnerKeepUnpin is used to make runtime.(*Pinner).Unpin reachable.

var debugPinnerKeepUnpin bool = false

debugPtrmask var #

var debugPtrmask struct{...}

debugScanConservative const #

debugScanConservative enables debug logging for stack frames that are scanned conservatively.

const debugScanConservative = false

debugSelect const #

const debugSelect = false

debugTraceReentrancy const #

debugTraceReentrancy checks if the trace is reentrant. This is optional because throwing in a function makes it instantly not inlineable, and we want traceAcquire to be inlineable for low overhead when the trace is disabled.

const debugTraceReentrancy = false

debuglock var #

var debuglock mutex

defaultGOROOT var #

var defaultGOROOT string

defaultHeapMinimum const #

defaultHeapMinimum is the value of heapMinimum for GOGC==100.

const defaultHeapMinimum = *ast.BinaryExpr

defaultTraceAdvancePeriod const #

traceAdvancePeriod is the approximate period between new generations.

const defaultTraceAdvancePeriod = 1e9

devswap var #

var devswap = *ast.CallExpr

didothers var #

var didothers bool

dirBufSize const #

size of buffer to read from a directory

const dirBufSize = 4096

disableMemoryProfiling var #

disableMemoryProfiling is set by the linker if memory profiling is not used and the link type guarantees nobody else could use it elsewhere. We check if the runtime.memProfileInternal symbol is present.

var disableMemoryProfiling bool

disableSigChan var #

channels for synchronizing signal mask updates with the signal mask thread

var disableSigChan chan uint32

disarmed const #

const disarmed = 0xFFFF

divideError var #

var divideError = *ast.CallExpr

dlogEnabled const #

const dlogEnabled = false

dlogEnabled const #

const dlogEnabled = true

doubleCheckHeapSetType const #

const doubleCheckHeapSetType = doubleCheckMalloc

doubleCheckMalloc const #

doubleCheckMalloc enables a bunch of extra checks to malloc to double-check that various invariants are upheld. We might consider turning these on by default; many of them previously were. They account for a few % of mallocgc's cost though, which does matter somewhat at scale.

const doubleCheckMalloc = false

doubleCheckReadMemStats var #

doubleCheckReadMemStats controls a double-check mode for ReadMemStats that ensures consistency between the values that ReadMemStats is using and the runtime-internal stats.

var doubleCheckReadMemStats = false

drainCheckThreshold const #

drainCheckThreshold specifies how many units of work to do between self-preemption checks in gcDrain. Assuming a scan rate of 1 MB/ms, this is ~100 µs. Lower values have higher overhead in the scan loop (the scheduler check may perform a syscall, so its overhead is nontrivial). Higher values make the system less responsive to incoming work.

const drainCheckThreshold = 100000

dumpfd var #

var dumpfd uintptr

dumphdr var #

var dumphdr = *ast.CallExpr

emptyInterfaceSwitchCache var #

Empty interface switch cache. Contains one entry with a nil Typ (which causes a cache lookup to fail immediately.)

var emptyInterfaceSwitchCache = abi.InterfaceSwitchCache{...}

emptyOne const #

const emptyOne = 1

emptyRest const #

Possible tophash values. We reserve a few possibilities for special marks. Each bucket (including its overflow buckets, if any) will have either all or none of its entries in the evacuated* states (except during the evacuate() method, which only happens during map writes and thus no one else can observe the map during that time).

const emptyRest = 0

emptyTypeAssertCache var #

Empty type assert cache. Contains one entry with a nil Typ (which causes a cache lookup to fail immediately.)

var emptyTypeAssertCache = abi.TypeAssertCache{...}

emptymspan var #

dummy mspan that contains no free objects.

var emptymspan mspan

emptystatus var #

var emptystatus = *ast.CallExpr

enableSigChan var #

channels for synchronizing signal mask updates with the signal mask thread

var enableSigChan chan uint32

envBufSize const #

size of buffer to read an environment variable (may grow)

const envBufSize = 128

envDir const #

Plan 9 environment device

const envDir = "/env/"

envs var #

var envs []string

epfd var #

var epfd int32 = *ast.UnaryExpr

evacuatedEmpty const #

const evacuatedEmpty = 4

evacuatedX const #

const evacuatedX = 2

evacuatedY const #

const evacuatedY = 3

eventHandler var #

eventHandler retrieves and executes handlers for pending JavaScript events. It returns true if an event was handled.

var eventHandler func() bool

events var #

events is a stack of calls from JavaScript into Go.

var events []*event

eventtypeClock const #

const eventtypeClock eventtype = iota

eventtypeFdRead const #

const eventtypeFdRead

eventtypeFdWrite const #

const eventtypeFdWrite

evts var #

var evts []event

execLock var #

execLock serializes exec and clone to avoid bugs or unspecified behaviour around exec'ing while creating/destroying threads. See issue #19546.

var execLock rwmutex

executablePath var #

go:linkname executablePath os.executablePath

var executablePath string

executablePath var #

go:linkname executablePath os.executablePath

var executablePath string

exiting var #

var exiting uint32

exiting var #

exiting is set to non-zero when the process is exiting.

var exiting uint32

expbits32 const #

const expbits32 uint = 8

expbits64 const #

const expbits64 uint = 11

extraM var #

Locking linked list of extra M's, via mp.schedlink. Must be accessed only via lockextra/unlockextra. Can't be atomic.Pointer[m] because we use an invalid pointer as a "locked" sentinel value. M's on this list remain visible to the GC because their mp.curg is on allgs.

var extraM atomic.Uintptr

extraMInUse var #

Number of extra M's in use by threads.

var extraMInUse atomic.Uint32

extraMLength var #

Number of M's in the extraM list.

var extraMLength atomic.Uint32

extraMWaiters var #

Number of waiters in lockextra.

var extraMWaiters atomic.Uint32

fInf const #

const fInf = 0x7FF0000000000000

fNegInf const #

const fNegInf = 0xFFF0000000000000

failallocatestack const #

These errors are reported (via writeErrStr) by some OS-specific versions of newosproc and newosproc0.

const failallocatestack = "runtime: failed to allocate stack for the new OS thread\n"

failthreadcreate const #

These errors are reported (via writeErrStr) by some OS-specific versions of newosproc and newosproc0.

const failthreadcreate = "runtime: failed to create new OS thread\n"

faketime var #

faketime is the simulated time in nanoseconds since 1970 for the playground.

var faketime int64 = 1257894000000000000

faketime var #

faketime is the simulated time in nanoseconds since 1970 for the playground. Zero means not to use faketime.

var faketime int64

faketimeState var #

var faketimeState struct{...}

fastlog2Table var #

var fastlog2Table = [*ast.BinaryExpr]float64{...}

fastlogNumBits const #

const fastlogNumBits = 5

fdReadwriteHangup const #

const fdReadwriteHangup eventrwflags = *ast.BinaryExpr

fieldKindEface const #

const fieldKindEface = 3

fieldKindEol const #

const fieldKindEol = 0

fieldKindIface const #

const fieldKindIface = 2

fieldKindPtr const #

const fieldKindPtr = 1

finalizer1 var #

var finalizer1 = [...]byte{...}

finc var #

This runs durring the GC sweep phase. Heap memory can't be allocated while sweep is running.

var finc *finblock

fing var #

This runs durring the GC sweep phase. Heap memory can't be allocated while sweep is running.

var fing *g

fingCreated const #

finalizer goroutine status.

const fingCreated uint32 = *ast.BinaryExpr

fingRunningFinalizer const #

finalizer goroutine status.

const fingRunningFinalizer

fingStatus var #

var fingStatus atomic.Uint32

fingUninitialized const #

finalizer goroutine status.

const fingUninitialized uint32 = iota

fingWait const #

finalizer goroutine status.

const fingWait

fingWake const #

finalizer goroutine status.

const fingWake

finlock var #

This runs durring the GC sweep phase. Heap memory can't be allocated while sweep is running.

var finlock mutex

finptrmask var #

This runs durring the GC sweep phase. Heap memory can't be allocated while sweep is running.

var finptrmask [*ast.BinaryExpr]byte

finq var #

This runs durring the GC sweep phase. Heap memory can't be allocated while sweep is running.

var finq *finblock

firstmoduledata var #

var firstmoduledata moduledata

fixedRootCount const #

const fixedRootCount

fixedRootFinalizers const #

const fixedRootFinalizers = iota

fixedRootFreeGStacks const #

const fixedRootFreeGStacks

fixedStack const #

const fixedStack = *ast.BinaryExpr

fixedStack0 const #

The minimum stack size to allocate. The hackery here rounds fixedStack0 up to a power of 2.

const fixedStack0 = *ast.BinaryExpr

fixedStack1 const #

const fixedStack1 = *ast.BinaryExpr

fixedStack2 const #

const fixedStack2 = *ast.BinaryExpr

fixedStack3 const #

const fixedStack3 = *ast.BinaryExpr

fixedStack4 const #

const fixedStack4 = *ast.BinaryExpr

fixedStack5 const #

const fixedStack5 = *ast.BinaryExpr

fixedStack6 const #

const fixedStack6 = *ast.BinaryExpr

floatError var #

var floatError = *ast.CallExpr

forcePreemptNS const #

forcePreemptNS is the time slice given to a G before it is preempted.

const forcePreemptNS = *ast.BinaryExpr

forcegc var #

var forcegc forcegcstate

forcegcperiod var #

forcegcperiod is the maximum time in nanoseconds between garbage collections. If we go this long without a garbage collection, one is forced to run. This is a variable for testing purposes. It normally doesn't change.

var forcegcperiod int64 = *ast.BinaryExpr

framepointer_enabled const #

Must agree with internal/buildcfg.FramePointerEnabled.

const framepointer_enabled = *ast.BinaryExpr

freeChunkSum const #

const freeChunkSum = *ast.CallExpr

freeMRef const #

Values for m.freeWait.

const freeMRef = 1

freeMStack const #

Values for m.freeWait.

const freeMStack = 0

freeMWait const #

Values for m.freeWait.

const freeMWait = 2

freemark var #

Bit vector of free marks. Needs to be as big as the largest number of objects per span.

var freemark [*ast.BinaryExpr]bool

freezeStopWait const #

freezeStopWait is a large value that freezetheworld sets sched.stopwait to in order to request that all Gs permanently stop.

const freezeStopWait = 0x7fffffff

freezing var #

freezing is set to non-zero if the runtime is trying to freeze the world.

var freezing atomic.Bool

fwdSig var #

Stores the signal handlers registered before Go installed its own. These signal handlers will be invoked in cases where Go doesn't want to handle a particular signal (e.g., signal occurred on a non-Go thread). See sigfwdgo for more information on when the signals are forwarded. This is read by the signal handler; accesses should use atomic.Loaduintptr and atomic.Storeuintptr.

var fwdSig [_NSIG]uintptr

g0 var #

var g0 g

gStatusStrings var #

var gStatusStrings = [...]string{...}

gTrackingPeriod const #

gTrackingPeriod is the number of transitions out of _Grunning between latency tracking runs.

const gTrackingPeriod = 8

gcAssistTimeSlack const #

gcAssistTimeSlack is the nanoseconds of mutator assist time that can accumulate on a P before updating gcController.assistTime.

const gcAssistTimeSlack = 5000

gcBackgroundMode const #

const gcBackgroundMode gcMode = iota

gcBackgroundUtilization const #

gcBackgroundUtilization is the fixed CPU utilization for background marking. It must be <= gcGoalUtilization. The difference between gcGoalUtilization and gcBackgroundUtilization will be made up by mark assists. The scheduler will aim to use within 50% of this goal. As a general rule, there's little reason to set gcBackgroundUtilization < gcGoalUtilization. One reason might be in mostly idle applications, where goroutines are unlikely to assist at all, so the actual utilization will be lower than the goal. But this is moot point because the idle mark workers already soak up idle CPU resources. These two values are still kept separate however because they are distinct conceptually, and in previous iterations of the pacer the distinction was more important.

const gcBackgroundUtilization = 0.25

gcBgMarkWorkerCount var #

Total number of gcBgMarkWorker goroutines. Protected by worldsema.

var gcBgMarkWorkerCount int32

gcBgMarkWorkerPool var #

Pool of GC parked background workers. Entries are type *gcBgMarkWorkerNode.

var gcBgMarkWorkerPool lfstack

gcBitsArenas var #

var gcBitsArenas struct{...}

gcBitsChunkBytes const #

const gcBitsChunkBytes = *ast.CallExpr

gcBitsHeaderBytes const #

const gcBitsHeaderBytes = *ast.CallExpr

gcBlackenEnabled var #

gcBlackenEnabled is 1 if mutator assists and background mark workers are allowed to blacken objects. This must only be set when gcphase == _GCmark.

var gcBlackenEnabled uint32

gcCPULimiter var #

gcCPULimiter is a mechanism to limit GC CPU utilization in situations where it might become excessive and inhibit application progress (e.g. a death spiral). The core of the limiter is a leaky bucket mechanism that fills with GC CPU time and drains with mutator time. Because the bucket fills and drains with time directly (i.e. without any weighting), this effectively sets a very conservative limit of 50%. This limit could be enforced directly, however, but the purpose of the bucket is to accommodate spikes in GC CPU utilization without hurting throughput. Note that the bucket in the leaky bucket mechanism can never go negative, so the GC never gets credit for a lot of CPU time spent without the GC running. This is intentional, as an application that stays idle for, say, an entire day, could build up enough credit to fail to prevent a death spiral the following day. The bucket's capacity is the GC's only leeway. The capacity thus also sets the window the limiter considers. For example, if the capacity of the bucket is 1 cpu-second, then the limiter will not kick in until at least 1 full cpu-second in the last 2 cpu-second window is spent on GC CPU time.

var gcCPULimiter gcCPULimiterState

gcCPULimiterUpdatePeriod const #

gcCPULimiterUpdatePeriod dictates the maximum amount of wall-clock time we can go before updating the limiter.

const gcCPULimiterUpdatePeriod = 10e6

gcController var #

gcController implements the GC pacing controller that determines when to trigger concurrent garbage collection and how much marking work to do in mutator assists and background marking. It calculates the ratio between the allocation rate (in terms of CPU time) and the GC scan throughput to determine the heap size at which to trigger a GC cycle such that no GC assists are required to finish on time. This algorithm thus optimizes GC CPU utilization to the dedicated background mark utilization of 25% of GOMAXPROCS by minimizing GC assists. GOMAXPROCS. The high-level design of this algorithm is documented at https://github.com/golang/proposal/blob/master/design/44167-gc-pacer-redesign.md. See https://golang.org/s/go15gcpacing for additional historical context.

var gcController gcControllerState

gcCreditSlack const #

gcCreditSlack is the amount of scan work credit that can accumulate locally before updating gcController.heapScanWork and, optionally, gcController.bgScanCredit. Lower values give a more accurate assist ratio and make it more likely that assists will successfully steal background credit. Higher values reduce memory contention.

const gcCreditSlack = 2000

gcDebugMarkDone var #

gcDebugMarkDone contains fields used to debug/test mark termination.

var gcDebugMarkDone struct{...}

gcDrainFlushBgCredit const #

const gcDrainFlushBgCredit

gcDrainFractional const #

const gcDrainFractional

gcDrainIdle const #

const gcDrainIdle

gcDrainUntilPreempt const #

const gcDrainUntilPreempt gcDrainFlags = *ast.BinaryExpr

gcForceBlockMode const #

const gcForceBlockMode

gcForceMode const #

const gcForceMode

gcGoalUtilization const #

gcGoalUtilization is the goal CPU utilization for marking as a fraction of GOMAXPROCS. Increasing the goal utilization will shorten GC cycles as the GC has more resources behind it, lessening costs from the write barrier, but comes at the cost of increasing mutator latency.

const gcGoalUtilization = gcBackgroundUtilization

gcMarkDoneFlushed var #

gcMarkDoneFlushed counts the number of P's with flushed work. Ideally this would be a captured local in gcMarkDone, but forEachP escapes its callback closure, so it can't capture anything. This is protected by markDoneSema.

var gcMarkDoneFlushed uint32

gcMarkWorkerDedicatedMode const #

gcMarkWorkerDedicatedMode indicates that the P of a mark worker is dedicated to running that mark worker. The mark worker should run without preemption.

const gcMarkWorkerDedicatedMode

gcMarkWorkerFractionalMode const #

gcMarkWorkerFractionalMode indicates that a P is currently running the "fractional" mark worker. The fractional worker is necessary when GOMAXPROCS*gcBackgroundUtilization is not an integer and using only dedicated workers would result in utilization too far from the target of gcBackgroundUtilization. The fractional worker should run until it is preempted and will be scheduled to pick up the fractional part of GOMAXPROCS*gcBackgroundUtilization.

const gcMarkWorkerFractionalMode

gcMarkWorkerIdleMode const #

gcMarkWorkerIdleMode indicates that a P is running the mark worker because it has nothing else to do. The idle worker should run until it is preempted and account its time against gcController.idleMarkTime.

const gcMarkWorkerIdleMode

gcMarkWorkerModeStrings var #

gcMarkWorkerModeStrings are the strings labels of gcMarkWorkerModes to use in execution traces.

var gcMarkWorkerModeStrings = [...]string{...}

gcMarkWorkerNotWorker const #

gcMarkWorkerNotWorker indicates that the next scheduled G is not starting work and the mode should be ignored.

const gcMarkWorkerNotWorker gcMarkWorkerMode = iota

gcOverAssistWork const #

gcOverAssistWork determines how many extra units of scan work a GC assist does when an assist happens. This amortizes the cost of an assist by pre-paying for this many bytes of future allocations.

const gcOverAssistWork = *ast.BinaryExpr

gcStatsDep const #

const gcStatsDep

gcTriggerCycle const #

gcTriggerCycle indicates that a cycle should be started if we have not yet started cycle number gcTrigger.n (relative to work.cycles).

const gcTriggerCycle

gcTriggerHeap const #

gcTriggerHeap indicates that a cycle should be started when the heap size reaches the trigger heap size computed by the controller.

const gcTriggerHeap gcTriggerKind = iota

gcTriggerTime const #

gcTriggerTime indicates that a cycle should be started when it's been more than forcegcperiod nanoseconds since the previous GC cycle.

const gcTriggerTime

gcphase var #

Garbage collector phase. Indicates to write barrier and synchronization task to perform.

var gcphase uint32

gcrash var #

gcrash is a fake g that can be used when crashing due to bad stack conditions.

var gcrash g

gcsema var #

Holding gcsema grants the M the right to block a GC, and blocks until the current GC is done. In particular, it prevents gomaxprocs from changing concurrently. TODO(mknyszek): Once gomaxprocs and the execution tracer can handle being changed/enabled during a GC, remove this.

var gcsema uint32 = 1

globalAlloc var #

var globalAlloc struct{...}

globalRand var #

globalRand holds the global random state. It is only used at startup and for creating new m's. Otherwise the per-m random state should be used by calling goodrand.

var globalRand struct{...}

goarm var #

set by cmd/link on arm systems accessed using linkname by internal/runtime/atomic. goarm should be an internal detail, but widely used packages access it using linkname. Notable members of the hall of shame include: - github.com/creativeprojects/go-selfupdate Do not remove or change the type signature. See go.dev/issue/67401. go:linkname goarm

var goarm uint8

goarmsoftfp var #

set by cmd/link on arm systems accessed using linkname by internal/runtime/atomic. goarm should be an internal detail, but widely used packages access it using linkname. Notable members of the hall of shame include: - github.com/creativeprojects/go-selfupdate Do not remove or change the type signature. See go.dev/issue/67401. go:linkname goarm

var goarmsoftfp uint8

godebugDefault var #

var godebugDefault string

godebugEnv var #

var godebugEnv *ast.IndexExpr

godebugNewIncNonDefault var #

var godebugNewIncNonDefault *ast.IndexExpr

godebugUpdate var #

var godebugUpdate *ast.IndexExpr

goexits var #

var goexits = *ast.CallExpr

gomaxprocs var #

var gomaxprocs int32

goroutineProfile var #

var goroutineProfile = struct{...}{...}

goroutineProfileAbsent const #

const goroutineProfileAbsent goroutineProfileState = iota

goroutineProfileInProgress const #

const goroutineProfileInProgress

goroutineProfileSatisfied const #

const goroutineProfileSatisfied

handlingSig var #

handlingSig is indexed by signal number and is non-zero if we are currently handling the signal. Or, to put it another way, whether the signal handler is currently set to the Go signal handler or not. This is uint32 rather than bool so that we can use atomic instructions.

var handlingSig [_NSIG]uint32

hashLoad const #

exported value for testing

const hashLoad = *ast.BinaryExpr

hashRandomBytes const #

const hashRandomBytes = *ast.BinaryExpr

hashWriting const #

const hashWriting = 4

hashkey var #

used in hash{32,64}.go to seed the hash function

var hashkey [4]uintptr

haveHighResSleep var #

haveHighResSleep indicates that NtCreateWaitCompletionPacket exists and haveHighResTimer is true. NtCreateWaitCompletionPacket has been available since Windows 10, but has just been publicly documented, so some platforms, like Wine, doesn't support it yet.

var haveHighResSleep = false

haveHighResSleep var #

var haveHighResSleep = true

haveHighResTimer var #

haveHighResTimer indicates that the CreateWaitableTimerEx CREATE_WAITABLE_TIMER_HIGH_RESOLUTION flag is available.

var haveHighResTimer = false

haveSysmon const #

haveSysmon indicates whether there is sysmon thread support. No threads on wasm yet, so no sysmon.

const haveSysmon = *ast.BinaryExpr

hchanSize const #

const hchanSize = *ast.BinaryExpr

heapAddrBits const #

heapAddrBits is the number of bits in a heap address. On amd64, addresses are sign-extended beyond heapAddrBits. On other arches, they are zero-extended. On most 64-bit platforms, we limit this to 48 bits based on a combination of hardware and OS limitations. amd64 hardware limits addresses to 48 bits, sign-extended to 64 bits. Addresses where the top 16 bits are not either all 0 or all 1 are "non-canonical" and invalid. Because of these "negative" addresses, we offset addresses by 1<<47 (arenaBaseOffset) on amd64 before computing indexes into the heap arenas index. In 2017, amd64 hardware added support for 57 bit addresses; however, currently only Linux supports this extension and the kernel will never choose an address above 1<<47 unless mmap is called with a hint address above 1<<47 (which we never do). arm64 hardware (as of ARMv8) limits user addresses to 48 bits, in the range [0, 1<<48). ppc64, mips64, and s390x support arbitrary 64 bit addresses in hardware. On Linux, Go leans on stricter OS limits. Based on Linux's processor.h, the user address space is limited as follows on 64-bit architectures: Architecture Name Maximum Value (exclusive) --------------------------------------------------------------------- amd64 TASK_SIZE_MAX 0x007ffffffff000 (47 bit addresses) arm64 TASK_SIZE_64 0x01000000000000 (48 bit addresses) ppc64{,le} TASK_SIZE_USER64 0x00400000000000 (46 bit addresses) mips64{,le} TASK_SIZE64 0x00010000000000 (40 bit addresses) s390x TASK_SIZE 1<<64 (64 bit addresses) These limits may increase over time, but are currently at most 48 bits except on s390x. On all architectures, Linux starts placing mmap'd regions at addresses that are significantly below 48 bits, so even if it's possible to exceed Go's 48 bit limit, it's extremely unlikely in practice. On 32-bit platforms, we accept the full 32-bit address space because doing so is cheap. mips32 only has access to the low 2GB of virtual memory, so we further limit it to 31 bits. On ios/arm64, although 64-bit pointers are presumably available, pointers are truncated to 33 bits in iOS <14. Furthermore, only the top 4 GiB of the address space are actually available to the application. In iOS >=14, more of the address space is available, and the OS can now provide addresses outside of those 33 bits. Pick 40 bits as a reasonable balance between address space usage by the page allocator, and flexibility for what mmap'd regions we'll accept for the heap. We can't just move to the full 48 bits because this uses too much address space for older iOS versions. TODO(mknyszek): Once iOS <14 is deprecated, promote ios/arm64 to a 48-bit address space like every other arm64 platform. WebAssembly currently has a limit of 4GB linear memory.

const heapAddrBits = *ast.BinaryExpr

heapArenaBitmapWords const #

heapArenaBitmapWords is the size of each heap arena's bitmap in uintptrs.

const heapArenaBitmapWords = *ast.BinaryExpr

heapArenaBytes const #

heapArenaBytes is the size of a heap arena. The heap consists of mappings of size heapArenaBytes, aligned to heapArenaBytes. The initial heap mapping is one arena. This is currently 64MB on 64-bit non-Windows and 4MB on 32-bit and on Windows. We use smaller arenas on Windows because all committed memory is charged to the process, even if it's not touched. Hence, for processes with small heaps, the mapped arena space needs to be commensurate. This is particularly important with the race detector, since it significantly amplifies the cost of committed memory.

const heapArenaBytes = *ast.BinaryExpr

heapArenaWords const #

const heapArenaWords = *ast.BinaryExpr

heapStatsDep const #

const heapStatsDep statDep = iota

hicb const #

const hicb = 0xBF

hpetDevMap var #

var hpetDevMap [_HPET_DEV_MAP_MAX]uintptr

hpetDevPath const #

const hpetDevPath = "/dev/hpetX\x00"

idleStart var #

var idleStart int64

idleTimeout var #

The timeout event started by beforeIdle.

var idleTimeout *timeoutEvent

idlepMask var #

Bitmask of Ps in _Pidle list, one bit per P. Reads and writes must be atomic. Length may change at safe points. Each P must update only its own bit. In order to maintain consistency, a P going idle must the idle mask simultaneously with updates to the idle P list under the sched.lock, otherwise a racing pidleget may clear the mask before pidleput sets the mask, corrupting the bitmap. N.B., procresize takes ownership of all Ps in stopTheWorldWithSema.

var idlepMask pMask

inForkedChild var #

inForkedChild is true while manipulating signals in the child process. This is used to avoid calling libc functions in case we are using vfork.

var inForkedChild bool

inProgress var #

inProgress is a byte whose address is a sentinel indicating that some thread is currently building the GC bitmask for a type.

var inProgress byte

inf var #

var inf = *ast.CallExpr

inf32 const #

const inf32 uint32 = *ast.BinaryExpr

inf64 const #

const inf64 uint64 = *ast.BinaryExpr

initSigmask var #

Value to use for signal mask for newly created M's.

var initSigmask sigset

inittrace var #

inittrace stores statistics for init functions which are updated by malloc and newproc when active is true.

var inittrace tracestat

intArgRegs var #

intArgRegs is used by the various register assignment algorithm implementations in the runtime. These include:. - Finalizers (mfinal.go) - Windows callbacks (syscall_windows.go) Both are stripped-down versions of the algorithm since they only have to deal with a subset of cases (finalizers only take a pointer or interface argument, Go Windows callbacks don't support floating point). It should be modified with care and are generally only modified when testing this package. It should never be set higher than its internal/abi constant counterparts, because the system relies on a structure that is at least large enough to hold the registers the system supports. Protected by finlock.

var intArgRegs = abi.IntArgRegs

iocphandle var #

var iocphandle uintptr = _INVALID_HANDLE_VALUE

isIdleInSynctest var #

isIdleInSynctest indicates that a goroutine is considered idle by synctest.Wait.

var isIdleInSynctest = [*ast.CallExpr]bool{...}

isIntel var #

var isIntel bool

isSbrkPlatform const #

const isSbrkPlatform = true

isSbrkPlatform const #

const isSbrkPlatform = false

isWaitingForSuspendG var #

isWaitingForSuspendG indicates that a goroutine is only entering _Gwaiting and setting a waitReason because it needs to be able to let the suspendG (used by the GC and the execution tracer) take ownership of its stack. The G is always actually executing on the system stack in these cases. TODO(mknyszek): Consider replacing this with a new dedicated G status.

var isWaitingForSuspendG = [*ast.CallExpr]bool{...}

isarchive var #

Set by the linker so the runtime can determine the buildmode.

var isarchive bool

iscgo var #

iscgo is set to true by the runtime/cgo package iscgo should be an internal detail, but widely used packages access it using linkname. Notable members of the hall of shame include: - github.com/ebitengine/purego Do not remove or change the type signature. See go.dev/issue/67401. go:linkname iscgo

var iscgo bool

islibrary var #

Set by the linker so the runtime can determine the buildmode.

var islibrary bool

itabInitSize const #

const itabInitSize = 512

itabLock var #

var itabLock mutex

itabTable var #

var itabTable = *ast.UnaryExpr

itabTableInit var #

var itabTableInit = itabTableType{...}

iterator const #

flags

const iterator = 1

kq var #

var kq int32 = *ast.UnaryExpr

kqIdent const #

Magic number of identifier used for EVFILT_USER. This number had zero Google results when it's created. That way, people will be directed here when this number get printed somehow and they search for it.

const kqIdent = 0xee1eb9f4

labelSync var #

var labelSync uintptr

largeSizeDiv const #

const largeSizeDiv = 128

lastmoduledatap var #

lastmoduledatap should be an internal detail, but widely used packages access it using linkname. Notable members of the hall of shame include: - github.com/bytedance/sonic Do not remove or change the type signature. See go.dev/issues/67401. See go.dev/issues/71672. go:linkname lastmoduledatap

var lastmoduledatap *moduledata

legacy const #

const legacy

levelBits var #

levelBits is the number of bits in the radix for a given level in the super summary structure. The sum of all the entries of levelBits should equal heapAddrBits.

var levelBits = [summaryLevels]uint{...}

levelBits var #

See comment in mpagealloc_64bit.go.

var levelBits = [summaryLevels]uint{...}

levelLogPages var #

See comment in mpagealloc_64bit.go.

var levelLogPages = [summaryLevels]uint{...}

levelLogPages var #

levelLogPages is log2 the maximum number of runtime pages in the address space a summary in the given level represents. The leaf level always represents exactly log2 of 1 chunk's worth of pages.

var levelLogPages = [summaryLevels]uint{...}

levelShift var #

See comment in mpagealloc_64bit.go.

var levelShift = [summaryLevels]uint{...}

levelShift var #

levelShift is the number of bits to shift to acquire the radix for a given level in the super summary structure. With levelShift, one can compute the index of the summary at level l related to a pointer p by doing: p >> levelShift[l]

var levelShift = [summaryLevels]uint{...}

libc__Errno var #

libc

var libc__Errno libFunc

libc____errno var #

var libc____errno libcFunc

libc___mod_init var #

var libc___mod_init libFunc

libc___n_pthreads var #

var libc___n_pthreads libFunc

libc_chdir var #

var libc_chdir libcFunc

libc_chdir var #

var libc_chdir libFunc

libc_chroot var #

var libc_chroot libFunc

libc_chroot var #

var libc_chroot libcFunc

libc_clock_gettime var #

libc

var libc_clock_gettime libFunc

libc_clock_gettime var #

var libc_clock_gettime libcFunc

libc_close var #

var libc_close libcFunc

libc_close var #

libc

var libc_close libFunc

libc_dup2 var #

var libc_dup2 libFunc

libc_execve var #

var libc_execve libFunc

libc_execve var #

var libc_execve libcFunc

libc_exit var #

libc

var libc_exit libFunc

libc_exit var #

var libc_exit libcFunc

libc_fcntl var #

var libc_fcntl libFunc

libc_fcntl var #

var libc_fcntl libcFunc

libc_fork var #

var libc_fork libFunc

libc_forkx var #

var libc_forkx libcFunc

libc_getcontext var #

var libc_getcontext libcFunc

libc_getegid var #

libc

var libc_getegid libFunc

libc_geteuid var #

libc

var libc_geteuid libFunc

libc_getgid var #

libc

var libc_getgid libFunc

libc_gethostname var #

var libc_gethostname libcFunc

libc_getpid var #

libc

var libc_getpid libFunc

libc_getpid var #

var libc_getpid libcFunc

libc_getrctl var #

var libc_getrctl libcFunc

libc_getsystemcfg var #

libc

var libc_getsystemcfg libFunc

libc_getuid var #

libc

var libc_getuid libFunc

libc_ioctl var #

var libc_ioctl libFunc

libc_ioctl var #

var libc_ioctl libcFunc

libc_issetugid var #

var libc_issetugid libcFunc

libc_kill var #

libc

var libc_kill libFunc

libc_kill var #

var libc_kill libcFunc

libc_madvise var #

var libc_madvise libcFunc

libc_madvise var #

libc

var libc_madvise libFunc

libc_malloc var #

var libc_malloc libcFunc

libc_malloc var #

libc

var libc_malloc libFunc

libc_mmap var #

var libc_mmap libcFunc

libc_mmap var #

libc

var libc_mmap libFunc

libc_mprotect var #

libc

var libc_mprotect libFunc

libc_munmap var #

libc

var libc_munmap libFunc

libc_munmap var #

var libc_munmap libcFunc

libc_open var #

var libc_open libcFunc

libc_open var #

libc

var libc_open libFunc

libc_pipe var #

libc

var libc_pipe libFunc

libc_pipe2 var #

var libc_pipe2 libcFunc

libc_poll var #

var libc_poll libFunc

libc_port_alert var #

var libc_port_alert libcFunc

libc_port_associate var #

var libc_port_associate libcFunc

libc_port_create var #

var libc_port_create libcFunc

libc_port_dissociate var #

var libc_port_dissociate libcFunc

libc_port_getn var #

var libc_port_getn libcFunc

libc_pthread_attr_destroy var #

var libc_pthread_attr_destroy libcFunc

libc_pthread_attr_getstack var #

var libc_pthread_attr_getstack libcFunc

libc_pthread_attr_init var #

var libc_pthread_attr_init libcFunc

libc_pthread_attr_setdetachstate var #

var libc_pthread_attr_setdetachstate libcFunc

libc_pthread_attr_setstack var #

var libc_pthread_attr_setstack libcFunc

libc_pthread_create var #

var libc_pthread_create libcFunc

libc_pthread_kill var #

var libc_pthread_kill libcFunc

libc_pthread_self var #

var libc_pthread_self libcFunc

libc_raise var #

var libc_raise libcFunc

libc_raise var #

libc

var libc_raise libFunc

libc_rctlblk_get_local_action var #

var libc_rctlblk_get_local_action libcFunc

libc_rctlblk_get_local_flags var #

var libc_rctlblk_get_local_flags libcFunc

libc_rctlblk_get_value var #

var libc_rctlblk_get_value libcFunc

libc_rctlblk_size var #

var libc_rctlblk_size libcFunc

libc_read var #

libc

var libc_read libFunc

libc_read var #

var libc_read libcFunc

libc_sched_yield var #

libc

var libc_sched_yield libFunc

libc_sched_yield var #

var libc_sched_yield libcFunc

libc_select var #

var libc_select libcFunc

libc_sem_init var #

libc

var libc_sem_init libFunc

libc_sem_init var #

var libc_sem_init libcFunc

libc_sem_post var #

libc

var libc_sem_post libFunc

libc_sem_post var #

var libc_sem_post libcFunc

libc_sem_reltimedwait_np var #

var libc_sem_reltimedwait_np libcFunc

libc_sem_timedwait var #

libc

var libc_sem_timedwait libFunc

libc_sem_wait var #

libc

var libc_sem_wait libFunc

libc_sem_wait var #

var libc_sem_wait libcFunc

libc_setgid var #

var libc_setgid libcFunc

libc_setgid var #

var libc_setgid libFunc

libc_setgroups var #

var libc_setgroups libFunc

libc_setgroups var #

var libc_setgroups libcFunc

libc_setitimer var #

var libc_setitimer libcFunc

libc_setitimer var #

libc

var libc_setitimer libFunc

libc_setpgid var #

var libc_setpgid libcFunc

libc_setpgid var #

var libc_setpgid libFunc

libc_setrlimit var #

var libc_setrlimit libFunc

libc_setrlimit var #

var libc_setrlimit libcFunc

libc_setsid var #

var libc_setsid libcFunc

libc_setsid var #

var libc_setsid libFunc

libc_setuid var #

var libc_setuid libFunc

libc_setuid var #

var libc_setuid libcFunc

libc_sigaction var #

libc

var libc_sigaction libFunc

libc_sigaction var #

var libc_sigaction libcFunc

libc_sigaltstack var #

var libc_sigaltstack libcFunc

libc_sigaltstack var #

libc

var libc_sigaltstack libFunc

libc_sigprocmask var #

var libc_sigprocmask libcFunc

libc_syscall var #

var libc_syscall libcFunc

libc_sysconf var #

libc

var libc_sysconf libFunc

libc_sysconf var #

var libc_sysconf libcFunc

libc_usleep var #

libc

var libc_usleep libFunc

libc_usleep var #

var libc_usleep libcFunc

libc_wait4 var #

var libc_wait4 libcFunc

libc_write var #

libc

var libc_write libFunc

libc_write var #

var libc_write libcFunc

libpthread___pth_init var #

libc

var libpthread___pth_init libFunc

libpthread_attr_destroy var #

libc

var libpthread_attr_destroy libFunc

libpthread_attr_getstacksize var #

libc

var libpthread_attr_getstacksize libFunc

libpthread_attr_init var #

libc

var libpthread_attr_init libFunc

libpthread_attr_setdetachstate var #

libc

var libpthread_attr_setdetachstate libFunc

libpthread_attr_setstackaddr var #

libc

var libpthread_attr_setstackaddr libFunc

libpthread_attr_setstacksize var #

libc

var libpthread_attr_setstacksize libFunc

libpthread_create var #

libc

var libpthread_create libFunc

libpthread_kill var #

libc

var libpthread_kill libFunc

libpthread_self var #

libc

var libpthread_self libFunc

libpthread_sigthreadmask var #

libc

var libpthread_sigthreadmask libFunc

limiterEventBits const #

const limiterEventBits = 3

limiterEventIdle const #

const limiterEventIdle

limiterEventIdleMarkWork const #

const limiterEventIdleMarkWork

limiterEventMarkAssist const #

const limiterEventMarkAssist

limiterEventNone const #

const limiterEventNone limiterEventType = iota

limiterEventScavengeAssist const #

const limiterEventScavengeAssist

limiterEventStampNone const #

limiterEventTypeMask is a mask for the bits in p.limiterEventStart that represent the event type. The rest of the bits of that field represent a timestamp.

const limiterEventStampNone = *ast.CallExpr

limiterEventTypeMask const #

limiterEventTypeMask is a mask for the bits in p.limiterEventStart that represent the event type. The rest of the bits of that field represent a timestamp.

const limiterEventTypeMask = *ast.BinaryExpr

loadFactorDen const #

Maximum average load of a bucket that triggers growth is bucketCnt*13/16 (about 80% full) Because of minimum alignment rules, bucketCnt is known to be at least 8. Represent as loadFactorNum/loadFactorDen, to allow integer math.

const loadFactorDen = 2

loadFactorDen const #

const loadFactorDen = 8

loadFactorNum const #

TODO: remove? These are used by tests but not the actual map

const loadFactorNum = 7

loadFactorNum const #

const loadFactorNum = *ast.BinaryExpr

locb const #

The default lowest and highest continuation byte.

const locb = 0x80

lockNames var #

lockNames gives the names associated with each of the above ranks.

var lockNames = []string{...}

lockPartialOrder var #

lockPartialOrder is the transitive closure of the lock rank graph. An entry for rank X lists all of the ranks that can already be held when rank X is acquired. Lock ranks that allow self-cycles list themselves.

var lockPartialOrder [][]lockRank = [][]lockRank{...}

lockRankAllg const #

Constants representing the ranks of all non-leaf runtime locks, in rank order. Locks with lower rank must be taken before locks with higher rank, in addition to satisfying the partial order in lockPartialOrder. A few ranks allow self-cycles, which are specified in lockPartialOrder.

const lockRankAllg

lockRankAllocmR const #

SCHED

const lockRankAllocmR

lockRankAllocmRInternal const #

Constants representing the ranks of all non-leaf runtime locks, in rank order. Locks with lower rank must be taken before locks with higher rank, in addition to satisfying the partial order in lockPartialOrder. A few ranks allow self-cycles, which are specified in lockPartialOrder.

const lockRankAllocmRInternal

lockRankAllocmW const #

Constants representing the ranks of all non-leaf runtime locks, in rank order. Locks with lower rank must be taken before locks with higher rank, in addition to satisfying the partial order in lockPartialOrder. A few ranks allow self-cycles, which are specified in lockPartialOrder.

const lockRankAllocmW

lockRankAllp const #

Constants representing the ranks of all non-leaf runtime locks, in rank order. Locks with lower rank must be taken before locks with higher rank, in addition to satisfying the partial order in lockPartialOrder. A few ranks allow self-cycles, which are specified in lockPartialOrder.

const lockRankAllp

lockRankAssistQueue const #

Constants representing the ranks of all non-leaf runtime locks, in rank order. Locks with lower rank must be taken before locks with higher rank, in addition to satisfying the partial order in lockPartialOrder. A few ranks allow self-cycles, which are specified in lockPartialOrder.

const lockRankAssistQueue

lockRankCpuprof const #

Constants representing the ranks of all non-leaf runtime locks, in rank order. Locks with lower rank must be taken before locks with higher rank, in addition to satisfying the partial order in lockPartialOrder. A few ranks allow self-cycles, which are specified in lockPartialOrder.

const lockRankCpuprof

lockRankDeadlock const #

Constants representing the ranks of all non-leaf runtime locks, in rank order. Locks with lower rank must be taken before locks with higher rank, in addition to satisfying the partial order in lockPartialOrder. A few ranks allow self-cycles, which are specified in lockPartialOrder.

const lockRankDeadlock

lockRankDefer const #

Constants representing the ranks of all non-leaf runtime locks, in rank order. Locks with lower rank must be taken before locks with higher rank, in addition to satisfying the partial order in lockPartialOrder. A few ranks allow self-cycles, which are specified in lockPartialOrder.

const lockRankDefer

lockRankExecR const #

Constants representing the ranks of all non-leaf runtime locks, in rank order. Locks with lower rank must be taken before locks with higher rank, in addition to satisfying the partial order in lockPartialOrder. A few ranks allow self-cycles, which are specified in lockPartialOrder.

const lockRankExecR

lockRankExecRInternal const #

Constants representing the ranks of all non-leaf runtime locks, in rank order. Locks with lower rank must be taken before locks with higher rank, in addition to satisfying the partial order in lockPartialOrder. A few ranks allow self-cycles, which are specified in lockPartialOrder.

const lockRankExecRInternal

lockRankExecW const #

Constants representing the ranks of all non-leaf runtime locks, in rank order. Locks with lower rank must be taken before locks with higher rank, in addition to satisfying the partial order in lockPartialOrder. A few ranks allow self-cycles, which are specified in lockPartialOrder.

const lockRankExecW

lockRankFin const #

MALLOC

const lockRankFin

lockRankForcegc const #

Constants representing the ranks of all non-leaf runtime locks, in rank order. Locks with lower rank must be taken before locks with higher rank, in addition to satisfying the partial order in lockPartialOrder. A few ranks allow self-cycles, which are specified in lockPartialOrder.

const lockRankForcegc

lockRankGcBitsArenas const #

MPROF

const lockRankGcBitsArenas

lockRankGlobalAlloc const #

Constants representing the ranks of all non-leaf runtime locks, in rank order. Locks with lower rank must be taken before locks with higher rank, in addition to satisfying the partial order in lockPartialOrder. A few ranks allow self-cycles, which are specified in lockPartialOrder.

const lockRankGlobalAlloc

lockRankGscan const #

STACKGROW

const lockRankGscan

lockRankHchan const #

Constants representing the ranks of all non-leaf runtime locks, in rank order. Locks with lower rank must be taken before locks with higher rank, in addition to satisfying the partial order in lockPartialOrder. A few ranks allow self-cycles, which are specified in lockPartialOrder.

const lockRankHchan

lockRankHchanLeaf const #

Constants representing the ranks of all non-leaf runtime locks, in rank order. Locks with lower rank must be taken before locks with higher rank, in addition to satisfying the partial order in lockPartialOrder. A few ranks allow self-cycles, which are specified in lockPartialOrder.

const lockRankHchanLeaf

lockRankItab const #

Constants representing the ranks of all non-leaf runtime locks, in rank order. Locks with lower rank must be taken before locks with higher rank, in addition to satisfying the partial order in lockPartialOrder. A few ranks allow self-cycles, which are specified in lockPartialOrder.

const lockRankItab

lockRankLeafRank const #

lockRankLeafRank is the rank of lock that does not have a declared rank, and hence is a leaf lock.

const lockRankLeafRank lockRank = 1000

lockRankMheap const #

Constants representing the ranks of all non-leaf runtime locks, in rank order. Locks with lower rank must be taken before locks with higher rank, in addition to satisfying the partial order in lockPartialOrder. A few ranks allow self-cycles, which are specified in lockPartialOrder.

const lockRankMheap

lockRankMheapSpecial const #

Constants representing the ranks of all non-leaf runtime locks, in rank order. Locks with lower rank must be taken before locks with higher rank, in addition to satisfying the partial order in lockPartialOrder. A few ranks allow self-cycles, which are specified in lockPartialOrder.

const lockRankMheapSpecial

lockRankMspanSpecial const #

Constants representing the ranks of all non-leaf runtime locks, in rank order. Locks with lower rank must be taken before locks with higher rank, in addition to satisfying the partial order in lockPartialOrder. A few ranks allow self-cycles, which are specified in lockPartialOrder.

const lockRankMspanSpecial

lockRankNetpollInit const #

Constants representing the ranks of all non-leaf runtime locks, in rank order. Locks with lower rank must be taken before locks with higher rank, in addition to satisfying the partial order in lockPartialOrder. A few ranks allow self-cycles, which are specified in lockPartialOrder.

const lockRankNetpollInit

lockRankNotifyList const #

Constants representing the ranks of all non-leaf runtime locks, in rank order. Locks with lower rank must be taken before locks with higher rank, in addition to satisfying the partial order in lockPartialOrder. A few ranks allow self-cycles, which are specified in lockPartialOrder.

const lockRankNotifyList

lockRankPanic const #

Constants representing the ranks of all non-leaf runtime locks, in rank order. Locks with lower rank must be taken before locks with higher rank, in addition to satisfying the partial order in lockPartialOrder. A few ranks allow self-cycles, which are specified in lockPartialOrder.

const lockRankPanic

lockRankPollCache const #

Constants representing the ranks of all non-leaf runtime locks, in rank order. Locks with lower rank must be taken before locks with higher rank, in addition to satisfying the partial order in lockPartialOrder. A few ranks allow self-cycles, which are specified in lockPartialOrder.

const lockRankPollCache

lockRankPollDesc const #

Constants representing the ranks of all non-leaf runtime locks, in rank order. Locks with lower rank must be taken before locks with higher rank, in addition to satisfying the partial order in lockPartialOrder. A few ranks allow self-cycles, which are specified in lockPartialOrder.

const lockRankPollDesc

lockRankProfBlock const #

Constants representing the ranks of all non-leaf runtime locks, in rank order. Locks with lower rank must be taken before locks with higher rank, in addition to satisfying the partial order in lockPartialOrder. A few ranks allow self-cycles, which are specified in lockPartialOrder.

const lockRankProfBlock

lockRankProfInsert const #

Constants representing the ranks of all non-leaf runtime locks, in rank order. Locks with lower rank must be taken before locks with higher rank, in addition to satisfying the partial order in lockPartialOrder. A few ranks allow self-cycles, which are specified in lockPartialOrder.

const lockRankProfInsert

lockRankProfMemActive const #

Constants representing the ranks of all non-leaf runtime locks, in rank order. Locks with lower rank must be taken before locks with higher rank, in addition to satisfying the partial order in lockPartialOrder. A few ranks allow self-cycles, which are specified in lockPartialOrder.

const lockRankProfMemActive

lockRankProfMemFuture const #

Constants representing the ranks of all non-leaf runtime locks, in rank order. Locks with lower rank must be taken before locks with higher rank, in addition to satisfying the partial order in lockPartialOrder. A few ranks allow self-cycles, which are specified in lockPartialOrder.

const lockRankProfMemFuture

lockRankRaceFini const #

Constants representing the ranks of all non-leaf runtime locks, in rank order. Locks with lower rank must be taken before locks with higher rank, in addition to satisfying the partial order in lockPartialOrder. A few ranks allow self-cycles, which are specified in lockPartialOrder.

const lockRankRaceFini

lockRankReflectOffs const #

Constants representing the ranks of all non-leaf runtime locks, in rank order. Locks with lower rank must be taken before locks with higher rank, in addition to satisfying the partial order in lockPartialOrder. A few ranks allow self-cycles, which are specified in lockPartialOrder.

const lockRankReflectOffs

lockRankRoot const #

Constants representing the ranks of all non-leaf runtime locks, in rank order. Locks with lower rank must be taken before locks with higher rank, in addition to satisfying the partial order in lockPartialOrder. A few ranks allow self-cycles, which are specified in lockPartialOrder.

const lockRankRoot

lockRankScavenge const #

Constants representing the ranks of all non-leaf runtime locks, in rank order. Locks with lower rank must be taken before locks with higher rank, in addition to satisfying the partial order in lockPartialOrder. A few ranks allow self-cycles, which are specified in lockPartialOrder.

const lockRankScavenge

lockRankSched const #

Constants representing the ranks of all non-leaf runtime locks, in rank order. Locks with lower rank must be taken before locks with higher rank, in addition to satisfying the partial order in lockPartialOrder. A few ranks allow self-cycles, which are specified in lockPartialOrder.

const lockRankSched

lockRankSpanSetSpine const #

Constants representing the ranks of all non-leaf runtime locks, in rank order. Locks with lower rank must be taken before locks with higher rank, in addition to satisfying the partial order in lockPartialOrder. A few ranks allow self-cycles, which are specified in lockPartialOrder.

const lockRankSpanSetSpine

lockRankStackLarge const #

Constants representing the ranks of all non-leaf runtime locks, in rank order. Locks with lower rank must be taken before locks with higher rank, in addition to satisfying the partial order in lockPartialOrder. A few ranks allow self-cycles, which are specified in lockPartialOrder.

const lockRankStackLarge

lockRankStackpool const #

Constants representing the ranks of all non-leaf runtime locks, in rank order. Locks with lower rank must be taken before locks with higher rank, in addition to satisfying the partial order in lockPartialOrder. A few ranks allow self-cycles, which are specified in lockPartialOrder.

const lockRankStackpool

lockRankStrongFromWeakQueue const #

Constants representing the ranks of all non-leaf runtime locks, in rank order. Locks with lower rank must be taken before locks with higher rank, in addition to satisfying the partial order in lockPartialOrder. A few ranks allow self-cycles, which are specified in lockPartialOrder.

const lockRankStrongFromWeakQueue

lockRankSudog const #

Constants representing the ranks of all non-leaf runtime locks, in rank order. Locks with lower rank must be taken before locks with higher rank, in addition to satisfying the partial order in lockPartialOrder. A few ranks allow self-cycles, which are specified in lockPartialOrder.

const lockRankSudog

lockRankSweep const #

Constants representing the ranks of all non-leaf runtime locks, in rank order. Locks with lower rank must be taken before locks with higher rank, in addition to satisfying the partial order in lockPartialOrder. A few ranks allow self-cycles, which are specified in lockPartialOrder.

const lockRankSweep

lockRankSweepWaiters const #

Constants representing the ranks of all non-leaf runtime locks, in rank order. Locks with lower rank must be taken before locks with higher rank, in addition to satisfying the partial order in lockPartialOrder. A few ranks allow self-cycles, which are specified in lockPartialOrder.

const lockRankSweepWaiters

lockRankSynctest const #

Constants representing the ranks of all non-leaf runtime locks, in rank order. Locks with lower rank must be taken before locks with higher rank, in addition to satisfying the partial order in lockPartialOrder. A few ranks allow self-cycles, which are specified in lockPartialOrder.

const lockRankSynctest

lockRankSysmon const #

Constants representing the ranks of all non-leaf runtime locks, in rank order. Locks with lower rank must be taken before locks with higher rank, in addition to satisfying the partial order in lockPartialOrder. A few ranks allow self-cycles, which are specified in lockPartialOrder.

const lockRankSysmon

lockRankTestR const #

Constants representing the ranks of all non-leaf runtime locks, in rank order. Locks with lower rank must be taken before locks with higher rank, in addition to satisfying the partial order in lockPartialOrder. A few ranks allow self-cycles, which are specified in lockPartialOrder.

const lockRankTestR

lockRankTestRInternal const #

Constants representing the ranks of all non-leaf runtime locks, in rank order. Locks with lower rank must be taken before locks with higher rank, in addition to satisfying the partial order in lockPartialOrder. A few ranks allow self-cycles, which are specified in lockPartialOrder.

const lockRankTestRInternal

lockRankTestW const #

Constants representing the ranks of all non-leaf runtime locks, in rank order. Locks with lower rank must be taken before locks with higher rank, in addition to satisfying the partial order in lockPartialOrder. A few ranks allow self-cycles, which are specified in lockPartialOrder.

const lockRankTestW

lockRankTimer const #

Constants representing the ranks of all non-leaf runtime locks, in rank order. Locks with lower rank must be taken before locks with higher rank, in addition to satisfying the partial order in lockPartialOrder. A few ranks allow self-cycles, which are specified in lockPartialOrder.

const lockRankTimer

lockRankTimerSend const #

Constants representing the ranks of all non-leaf runtime locks, in rank order. Locks with lower rank must be taken before locks with higher rank, in addition to satisfying the partial order in lockPartialOrder. A few ranks allow self-cycles, which are specified in lockPartialOrder.

const lockRankTimerSend

lockRankTimers const #

Constants representing the ranks of all non-leaf runtime locks, in rank order. Locks with lower rank must be taken before locks with higher rank, in addition to satisfying the partial order in lockPartialOrder. A few ranks allow self-cycles, which are specified in lockPartialOrder.

const lockRankTimers

lockRankTrace const #

TRACE

const lockRankTrace

lockRankTraceBuf const #

TRACEGLOBAL

const lockRankTraceBuf

lockRankTraceStackTab const #

Constants representing the ranks of all non-leaf runtime locks, in rank order. Locks with lower rank must be taken before locks with higher rank, in addition to satisfying the partial order in lockPartialOrder. A few ranks allow self-cycles, which are specified in lockPartialOrder.

const lockRankTraceStackTab

lockRankTraceStrings const #

Constants representing the ranks of all non-leaf runtime locks, in rank order. Locks with lower rank must be taken before locks with higher rank, in addition to satisfying the partial order in lockPartialOrder. A few ranks allow self-cycles, which are specified in lockPartialOrder.

const lockRankTraceStrings

lockRankTraceTypeTab const #

Constants representing the ranks of all non-leaf runtime locks, in rank order. Locks with lower rank must be taken before locks with higher rank, in addition to satisfying the partial order in lockPartialOrder. A few ranks allow self-cycles, which are specified in lockPartialOrder.

const lockRankTraceTypeTab

lockRankUnknown const #

Constants representing the ranks of all non-leaf runtime locks, in rank order. Locks with lower rank must be taken before locks with higher rank, in addition to satisfying the partial order in lockPartialOrder. A few ranks allow self-cycles, which are specified in lockPartialOrder.

const lockRankUnknown lockRank = iota

lockRankUserArenaState const #

Constants representing the ranks of all non-leaf runtime locks, in rank order. Locks with lower rank must be taken before locks with higher rank, in addition to satisfying the partial order in lockPartialOrder. A few ranks allow self-cycles, which are specified in lockPartialOrder.

const lockRankUserArenaState

lockRankWakeableSleep const #

Constants representing the ranks of all non-leaf runtime locks, in rank order. Locks with lower rank must be taken before locks with higher rank, in addition to satisfying the partial order in lockPartialOrder. A few ranks allow self-cycles, which are specified in lockPartialOrder.

const lockRankWakeableSleep

lockRankWbufSpans const #

WB

const lockRankWbufSpans

locked const #

const locked uintptr = 1

logHeapArenaBytes const #

logHeapArenaBytes is log_2 of heapArenaBytes. For clarity, prefer using heapArenaBytes where possible (we need the constant to compute some other constants).

const logHeapArenaBytes = *ast.BinaryExpr

logMaxPackedValue const #

const logMaxPackedValue = *ast.BinaryExpr

logPallocChunkBytes const #

const logPallocChunkBytes = *ast.BinaryExpr

logPallocChunkPages const #

const logPallocChunkPages = 9

logScavChunkInUseMax const #

logScavChunkInUseMax is the number of bits needed to represent the number of pages allocated in a single chunk. This is 1 more than log2 of the number of pages in the chunk because we need to represent a fully-allocated chunk.

const logScavChunkInUseMax = *ast.BinaryExpr

logd const #

const logd

logdAddr var #

used in initLogdWrite but defined here to avoid heap allocation.

var logdAddr sockaddr_un

logger var #

var logger loggerType

logicalStackSentinel const #

logicalStackSentinel is a sentinel value at pcBuf[0] signifying that pcBuf[1:] holds a logical stack requiring no further processing. Any other value at pcBuf[0] represents a skip value to apply to the physical stack in pcBuf[1:] after inline expansion.

const logicalStackSentinel = *ast.UnaryExpr

loong64HasLAMCAS var #

var loong64HasLAMCAS bool

loong64HasLAM_BH var #

var loong64HasLAM_BH bool

loong64HasLSX var #

var loong64HasLSX bool

m0 var #

var m0 m

m5 const #

const m5 = 0x1d8e4e27c47d124f

mProfCycle var #

var mProfCycle mProfCycleHolder

mProfCycleWrap const #

const mProfCycleWrap = *ast.BinaryExpr

mSpanDead const #

const mSpanDead mSpanState = iota

mSpanInUse const #

const mSpanInUse

mSpanManual const #

const mSpanManual

mSpanStateNames var #

mSpanStateNames are the names of the span states, indexed by mSpanState.

var mSpanStateNames = []string{...}

madviseUnsupported const #

const madviseUnsupported = 0

mainStarted var #

mainStarted indicates that the main M has started.

var mainStarted bool

main_init_done var #

main_init_done is a signal used by cgocallbackg that initialization has been completed. It is made before _cgo_notify_runtime_init_done, so all cgo calls can rely on it existing. When main_init is complete, it is closed, meaning cgocallbackg can reliably receive from it.

var main_init_done chan bool

mallocHeaderSize const #

A malloc header is functionally a single type pointer, but we need to use 8 here to ensure 8-byte alignment of allocations on 32-bit platforms. It's wasteful, but a lot of code relies on 8-byte alignment for 8-byte atomics.

const mallocHeaderSize = 8

mantbits32 const #

const mantbits32 uint = 23

mantbits64 const #

const mantbits64 uint = 52

maps_errNilAssign var #

go:linkname maps_errNilAssign internal/runtime/maps.errNilAssign

var maps_errNilAssign error = *ast.CallExpr

mask2 const #

const mask2 = 0x1F

mask3 const #

const mask3 = 0x0F

mask4 const #

const mask4 = 0x07

maskUpdatedChan var #

channels for synchronizing signal mask updates with the signal mask thread

var maskUpdatedChan chan struct{...}

maskx const #

const maskx = 0x3F

maxAlign const #

const maxAlign = 8

maxAlloc const #

maxAlloc is the maximum size of an allocation. On 64-bit, it's theoretically possible to allocate 1<

const maxAlloc = *ast.BinaryExpr

maxArgs const #

maxArgs should be divisible by 2, as Windows stack must be kept 16-byte aligned on syscall entry. Although it only permits maximum 42 parameters, it is arguably large enough.

const maxArgs = 42

maxCPUProfStack const #

const maxCPUProfStack = 64

maxInt64 const #

const maxInt64 = *ast.CallExpr

maxObjsPerSpan const #

const maxObjsPerSpan = 1024

maxObletBytes const #

maxObletBytes is the maximum bytes of an object to scan at once. Larger objects will be split up into "oblets" of at most this size. Since we can scan 1–2 MB/ms, 128 KB bounds scan preemption at ~100 µs. This must be > _MaxSmallSize so that the object base is the span base.

const maxObletBytes = *ast.BinaryExpr

maxOffAddr var #

maxOffAddr is the maximum address in the offset address space. It corresponds to the highest virtual address representable by the page alloc chunk and heap arena maps.

var maxOffAddr = offAddr{...}

maxPackedValue const #

maxPackedValue is the maximum value that any of the three fields in the pallocSum may take on.

const maxPackedValue = *ast.BinaryExpr

maxPagesPerPhysPage const #

maxPagesPerPhysPage is the maximum number of supported runtime pages per physical page, based on maxPhysPageSize.

const maxPagesPerPhysPage = *ast.BinaryExpr

maxPhysHugePageSize const #

maxPhysHugePageSize sets an upper-bound on the maximum huge page size that the runtime supports.

const maxPhysHugePageSize = pallocChunkBytes

maxPhysPageSize const #

maxPhysPageSize is the maximum page size the runtime supports.

const maxPhysPageSize = *ast.BinaryExpr

maxProfStackDepth const #

maxProfStackDepth is the highest valid value for debug.profstackdepth. It's used for the bucket.stk func. TODO(fg): can we get rid of this?

const maxProfStackDepth = 1024

maxRune const #

Numbers fundamental to the encoding.

const maxRune = '\U0010FFFF'

maxSkip const #

maxSkip is to account for deferred inline expansion when using frame pointer unwinding. We record the stack with "physical" frame pointers but handle skipping "logical" frames at some point after collecting the stack. So we need extra space in order to avoid getting fewer than the desired maximum number of frames after expansion. This should be at least as large as the largest skip value used for profiling; otherwise stacks may be truncated inconsistently

const maxSkip = 6

maxSmallSize const #

const maxSmallSize = _MaxSmallSize

maxStackScanSlack const #

maxStackScanSlack is the bytes of stack space allocated or freed that can accumulate on a P before updating gcController.stackSize.

const maxStackScanSlack = *ast.BinaryExpr

maxTinySize const #

const maxTinySize = _TinySize

maxTraceStringLen const #

const maxTraceStringLen = 1024

maxTriggerRatioNum const #

The maximum trigger constant is chosen somewhat arbitrarily, but the current constant has served us well over the years.

const maxTriggerRatioNum = 61

maxUint64 const #

const maxUint64 = *ast.UnaryExpr

maxWhen const #

maxWhen is the maximum value for timer's when field.

const maxWhen = *ast.BinaryExpr

maxstackceiling var #

var maxstackceiling = maxstacksize

maxstacksize var #

var maxstacksize uintptr = *ast.BinaryExpr

mbuckets var #

var mbuckets atomic.UnsafePointer

mcache0 var #

var mcache0 *mcache

memDebug const #

const memDebug = false

memFreelist var #

var memFreelist memHdrPtr

memProfile const #

profile types

const memProfile bucketType = *ast.BinaryExpr

memlock var #

var memlock mutex

memmoveBits var #

var memmoveBits uint8

memoryError var #

var memoryError = *ast.CallExpr

memoryLimitHeapGoalHeadroomPercent const #

memoryLimitHeapGoalHeadroomPercent is how headroom the memory-limit-based heap goal should have as a percent of the maximum possible heap goal allowed to maintain the memory limit.

const memoryLimitHeapGoalHeadroomPercent = 3

memoryLimitMinHeapGoalHeadroom const #

memoryLimitMinHeapGoalHeadroom is the minimum amount of headroom the pacer gives to the heap goal when operating in the memory-limited regime. That is, it'll reduce the heap goal by this many extra bytes off of the base calculation, at minimum.

const memoryLimitMinHeapGoalHeadroom = *ast.BinaryExpr

memstats var #

var memstats mstats

methodValueCallFrameObjs var #

var methodValueCallFrameObjs [1]stackObjectRecord

metricKindBad const #

These values must be kept identical to their corresponding Kind* values in the runtime/metrics package.

const metricKindBad metricKind = iota

metricKindFloat64 const #

const metricKindFloat64

metricKindFloat64Histogram const #

const metricKindFloat64Histogram

metricKindUint64 const #

const metricKindUint64

metrics var #

var metrics map[string]metricData

metricsInit var #

var metricsInit bool

metricsSema var #

metrics is a map of runtime/metrics keys to data used by the runtime to sample each metric's value. metricsInit indicates it has been initialized. These fields are protected by metricsSema which should be locked/unlocked with metricsLock() / metricsUnlock().

var metricsSema uint32 = 1

mheap_ var #

var mheap_ mheap

minHeapAlign const #

const minHeapAlign = 8

minHeapForMetadataHugePages const #

minHeapForMetadataHugePages sets a threshold on when certain kinds of heap metadata, currently the arenas map L2 entries and page alloc bitmap mappings, are allowed to be backed by huge pages. If the heap goal ever exceeds this threshold, then huge pages are enabled. These numbers are chosen with the assumption that huge pages are on the order of a few MiB in size. The kind of metadata this applies to has a very low overhead when compared to address space used, but their constant overheads for small heaps would be very high if they were to be backed by huge pages (e.g. a few MiB makes a huge difference for an 8 MiB heap, but barely any difference for a 1 GiB heap). The benefit of huge pages is also not worth it for small heaps, because only a very, very small part of the metadata is used for small heaps. N.B. If the heap goal exceeds the threshold then shrinks to a very small size again, then huge pages will still be enabled for this mapping. The reason is that there's no point unless we're also returning the physical memory for these metadata mappings back to the OS. That would be quite complex to do in general as the heap is likely fragmented after a reduction in heap size.

const minHeapForMetadataHugePages = *ast.BinaryExpr

minLegalPointer const #

minLegalPointer is the smallest possible legal pointer. This is the smallest possible architectural page size, since we assume that the first page is never mapped. This should agree with minZeroPage in the compiler.

const minLegalPointer uintptr = 4096

minOffAddr var #

minOffAddr is the minimum address in the offset space, and it corresponds to the virtual address arenaBaseOffset.

var minOffAddr = offAddr{...}

minPhysPageSize const #

minPhysPageSize is a lower-bound on the physical page size. The true physical page size may be larger than this. In contrast, sys.PhysPageSize is an upper-bound on the physical page size.

const minPhysPageSize = 4096

minScavWorkTime const #

Spend at least 1 ms scavenging, otherwise the corresponding sleep time to maintain our desired utilization is too low to be reliable.

const minScavWorkTime = 1e6

minSizeForMallocHeader const #

The minimum object size that has a malloc header, exclusive. The size of this value controls overheads from the malloc header. The minimum size is bound by writeHeapBitsSmall, which assumes that the pointer bitmap for objects of a size smaller than this doesn't cross more than one pointer-word boundary. This sets an upper-bound on this value at the number of bits in a uintptr, multiplied by the pointer size in bytes. We choose a value here that has a natural cutover point in terms of memory overheads. This value just happens to be the maximum possible value this can be. A span with heap bits in it will have 128 bytes of heap bits on 64-bit platforms, and 256 bytes of heap bits on 32-bit platforms. The first size class where malloc headers match this overhead for 64-bit platforms is 512 bytes (8 KiB / 512 bytes * 8 bytes-per-header = 128 bytes of overhead). On 32-bit platforms, this same point is the 256 byte size class (8 KiB / 256 bytes * 8 bytes-per-header = 256 bytes of overhead). Guaranteed to be exactly at a size class boundary. The reason this value is an exclusive minimum is subtle. Suppose we're allocating a 504-byte object and its rounded up to 512 bytes for the size class. If minSizeForMallocHeader is 512 and an inclusive minimum, then a comparison against minSizeForMallocHeader by the two values would produce different results. In other words, the comparison would not be invariant to size-class rounding. Eschewing this property means a more complex check or possibly storing additional state to determine whether a span has malloc headers.

const minSizeForMallocHeader = *ast.BinaryExpr

minTagBits const #

minTagBits is the minimum number of tag bits that we expect.

const minTagBits = 10

minTimeForTicksPerSecond const #

minTimeForTicksPerSecond is the minimum elapsed time we require to consider our ticksPerSecond measurement to be of decent enough quality for profiling. There's a linear relationship here between minimum time and error from the true value. The error from the true ticks-per-second in a linux/amd64 VM seems to be: - 1 ms -> ~0.02% error - 5 ms -> ~0.004% error - 10 ms -> ~0.002% error - 50 ms -> ~0.0003% error - 100 ms -> ~0.0001% error We're willing to take 0.004% error here, because ticksPerSecond is intended to be used for converting durations, not timestamps. Durations are usually going to be much larger, and so the tiny error doesn't matter. The error is definitely going to be a problem when trying to use this for timestamps, as it'll make those timestamps much less likely to line up.

const minTimeForTicksPerSecond = *ast.BinaryExpr

minTopHash const #

const minTopHash = 5

minTriggerRatioNum const #

The minimum trigger constant was chosen empirically: given a sufficiently fast/scalable allocator with 48 Ps that could drive the trigger ratio to <0.05, this constant causes applications to retain the same peak RSS compared to not having this allocator.

const minTriggerRatioNum = 45

minhexdigits var #

var minhexdigits = 0

modinfo var #

set using cmd/go/internal/modload.ModInfoProg

var modinfo string

modulesSlice var #

var modulesSlice *[]*moduledata

msanenabled const #

const msanenabled = false

msanenabled const #

Private interface for the runtime.

const msanenabled = true

mtx var #

var mtx mutex

mtxpoll var #

var mtxpoll mutex

mtxset var #

var mtxset mutex

mutexActiveSpinCount const #

const mutexActiveSpinCount = 4

mutexActiveSpinSize const #

const mutexActiveSpinSize = 30

mutexLocked const #

const mutexLocked = 0x001

mutexMMask const #

const mutexMMask = 0x3FF

mutexMOffset const #

const mutexMOffset = mallocHeaderSize

mutexPassiveSpinCount const #

const mutexPassiveSpinCount = 1

mutexProfile const #

const mutexProfile

mutexSleeping const #

const mutexSleeping = 0x002

mutexSpinning const #

const mutexSpinning = 0x100

mutexStackLocked const #

const mutexStackLocked = 0x200

mutexTailWakePeriod const #

const mutexTailWakePeriod = 16

mutex_locked const #

const mutex_locked = 1

mutex_locked const #

const mutex_locked = 1

mutex_locked const #

const mutex_locked = 1

mutex_sleeping const #

const mutex_sleeping = 2

mutex_unlocked const #

const mutex_unlocked = 0

mutex_unlocked const #

const mutex_unlocked = 0

mutex_unlocked const #

const mutex_unlocked = 0

mutexprofilerate var #

var mutexprofilerate uint64

nameOffset const #

offset of the name field in a 9P directory entry - see syscall.UnmarshalDir()

const nameOffset = 39

nan32 const #

const nan32 uint32 = *ast.BinaryExpr

nan64 const #

const nan64 uint64 = *ast.BinaryExpr

nbuf var #

var nbuf uintptr

ncgocall var #

var ncgocall uint64

ncpu var #

var ncpu int32

needSysmonWorkaround var #

needSysmonWorkaround is true if the workaround for golang.org/issue/42515 is needed on NetBSD.

var needSysmonWorkaround bool = false

neg32 const #

const neg32 uint32 = *ast.BinaryExpr

neg64 const #

const neg64 uint64 = *ast.BinaryExpr

netpollBreakRd var #

var netpollBreakRd uintptr

netpollBreakWr var #

var netpollBreakWr uintptr

netpollBroken var #

var netpollBroken bool

netpollBrokenLock var #

netpollBroken, protected by netpollBrokenLock, avoids a double notewakeup.

var netpollBrokenLock mutex

netpollEventFd var #

var netpollEventFd uintptr

netpollInitLock var #

var netpollInitLock mutex

netpollInited var #

var netpollInited atomic.Uint32

netpollInited var #

var netpollInited atomic.Uint32

netpollNote var #

var netpollNote note

netpollSourceBreak const #

Sources are used to identify the event that created an overlapped entry. The source values are arbitrary. There is no risk of collision with user defined values because the only way to set the key of an overlapped entry is using the iocphandle, which is not accessible to user code.

const netpollSourceBreak

netpollSourceReady const #

Sources are used to identify the event that created an overlapped entry. The source values are arbitrary. There is no risk of collision with user defined values because the only way to set the key of an overlapped entry is using the iocphandle, which is not accessible to user code.

const netpollSourceReady = *ast.BinaryExpr

netpollSourceTimer const #

Sources are used to identify the event that created an overlapped entry. The source values are arbitrary. There is no risk of collision with user defined values because the only way to set the key of an overlapped entry is using the iocphandle, which is not accessible to user code.

const netpollSourceTimer

netpollStubLock var #

var netpollStubLock mutex

netpollWaiters var #

var netpollWaiters atomic.Uint32

netpollWakeSig var #

var netpollWakeSig atomic.Uint32

netpollWakeSig var #

var netpollWakeSig atomic.Uint32

netpollWakeSig var #

var netpollWakeSig atomic.Uint32

netpollWakeSig var #

var netpollWakeSig atomic.Uint32

netpollWakeSig var #

var netpollWakeSig atomic.Uint32

newmHandoff var #

newmHandoff contains a list of m structures that need new OS threads. This is used by newm in situations where newm itself can't safely start an OS thread.

var newmHandoff struct{...}

newprocs var #

var newprocs int32

noCheck const #

sentinel bucket ID for iterator checks

const noCheck = *ast.BinaryExpr

note_cleared const #

const note_cleared = 0

note_timeout const #

const note_timeout = 2

note_woken const #

const note_woken = 1

notefile var #

var notefile = *ast.CallExpr

ntdlldll var #

var ntdlldll = [...]uint16{...}

numSpanClasses const #

const numSpanClasses = *ast.BinaryExpr

numStatsDeps const #

const numStatsDeps

numSweepClasses const #

const numSweepClasses = *ast.BinaryExpr

offsetARMHasIDIVA const #

Offsets into internal/cpu records for use in assembly.

const offsetARMHasIDIVA = *ast.CallExpr

offsetLOONG64HasLSX const #

Offsets into internal/cpu records for use in assembly.

const offsetLOONG64HasLSX = *ast.CallExpr

offsetMIPS64XHasMSA const #

Offsets into internal/cpu records for use in assembly.

const offsetMIPS64XHasMSA = *ast.CallExpr

offsetX86HasAVX const #

Offsets into internal/cpu records for use in assembly.

const offsetX86HasAVX = *ast.CallExpr

offsetX86HasAVX2 const #

Offsets into internal/cpu records for use in assembly.

const offsetX86HasAVX2 = *ast.CallExpr

offsetX86HasERMS const #

Offsets into internal/cpu records for use in assembly.

const offsetX86HasERMS = *ast.CallExpr

offsetX86HasRDTSCP const #

Offsets into internal/cpu records for use in assembly.

const offsetX86HasRDTSCP = *ast.CallExpr

oldIterator const #

const oldIterator = 2

oneptrmask var #

ptrmask for an allocation containing a single pointer.

var oneptrmask = [...]uint8{...}

osHasLowResClock const #

osHasLowResClock indicates that timestamps produced by nanotime on the platform have a low resolution, typically on the order of 1 ms or more.

const osHasLowResClock = *ast.BinaryExpr

osHasLowResClockInt const #

osHasLowResClockInt is osHasLowResClock but in integer form, so it can be used to create constants conditionally.

const osHasLowResClockInt = goos.IsWindows

osHasLowResTimer const #

osHasLowResTimer indicates that the platform's internal timer system has a low resolution, typically on the order of 1 ms or more.

const osHasLowResTimer = *ast.BinaryExpr

osRelaxMinNS const #

osRelaxMinNS indicates that sysmon shouldn't osRelax if the next timer is less than 60 ms from now. Since osRelaxing may reduce timer resolution to 15.6 ms, this keeps timer error under roughly 1 part in 4.

const osRelaxMinNS = *ast.BinaryExpr

osRelaxMinNS const #

osRelaxMinNS is the number of nanoseconds of idleness to tolerate without performing an osRelax. Since osRelax may reduce the precision of timers, this should be enough larger than the relaxed timer precision to keep the timer error acceptable.

const osRelaxMinNS = 0

overflowError var #

var overflowError = *ast.CallExpr

overflowTag var #

var overflowTag [1]unsafe.Pointer

overrideWrite var #

overrideWrite allows write to be redirected externally, by linkname'ing this and set it to a write function. overrideWrite should be an internal detail, but widely used packages access it using linkname. Notable members of the hall of shame include: - golang.zx2c4.com/wireguard/windows Do not remove or change the type signature. See go.dev/issue/67401. go:linkname overrideWrite

var overrideWrite func(fd uintptr, p unsafe.Pointer, n int32) int32

pageAlloc32Bit const #

Constants for testing.

const pageAlloc32Bit = 1

pageAlloc32Bit const #

Constants for testing.

const pageAlloc32Bit = 0

pageAlloc64Bit const #

const pageAlloc64Bit = 1

pageAlloc64Bit const #

const pageAlloc64Bit = 0

pageCachePages const #

const pageCachePages = *ast.BinaryExpr

pageShift const #

const pageShift = _PageShift

pageSize const #

const pageSize = _PageSize

pagesPerArena const #

const pagesPerArena = *ast.BinaryExpr

pagesPerReclaimerChunk const #

pagesPerReclaimerChunk indicates how many pages to scan from the pageInUse bitmap at a time. Used by the page reclaimer. Higher values reduce contention on scanning indexes (such as h.reclaimIndex), but increase the minimum latency of the operation. The time required to scan this many pages can vary a lot depending on how many spans are actually freed. Experimentally, it can scan for pages at ~300 GB/ms on a 2.6GHz Core i7, but can only free spans at ~32 MB/ms. Using 512 pages bounds this at roughly 100µs. Must be a multiple of the pageInUse bitmap element size and must also evenly divide pagesPerArena.

const pagesPerReclaimerChunk = 512

pagesPerSpanRoot const #

pagesPerSpanRoot indicates how many pages to scan from a span root at a time. Used by special root marking. Higher values improve throughput by increasing locality, but increase the minimum latency of a marking operation. Must be a multiple of the pageInUse bitmap element size and must also evenly divide pagesPerArena.

const pagesPerSpanRoot = 512

pagesize var #

var pagesize = *ast.CallExpr

pallocChunkBytes const #

const pallocChunkBytes = *ast.BinaryExpr

pallocChunkPages const #

The size of a bitmap chunk, i.e. the amount of bits (that is, pages) to consider in the bitmap at once.

const pallocChunkPages = *ast.BinaryExpr

pallocChunksL1Bits const #

Number of bits needed to represent all indices into the L1 of the chunks map. See (*pageAlloc).chunks for more details. Update the documentation there should this number change.

const pallocChunksL1Bits = 0

pallocChunksL1Bits const #

Number of bits needed to represent all indices into the L1 of the chunks map. See (*pageAlloc).chunks for more details. Update the documentation there should this number change.

const pallocChunksL1Bits = 13

pallocChunksL1Shift const #

const pallocChunksL1Shift = pallocChunksL2Bits

pallocChunksL2Bits const #

pallocChunksL2Bits is the number of bits of the chunk index number covered by the second level of the chunks map. See (*pageAlloc).chunks for more details. Update the documentation there should this change.

const pallocChunksL2Bits = *ast.BinaryExpr

pallocSumBytes const #

const pallocSumBytes = *ast.CallExpr

panicking var #

panicking is non-zero when crashing the program for an unrecovered panic.

var panicking atomic.Uint32

paniclk var #

paniclk is held while printing the panic information and stack trace, so that two concurrent panics don't overlap their output.

var paniclk mutex

panicnil var #

var panicnil = *ast.UnaryExpr

passive_spin const #

This implementation depends on OS-specific implementations of func semacreate(mp *m) Create a semaphore for mp, if it does not already have one. func semasleep(ns int64) int32 If ns < 0, acquire m's semaphore and return 0. If ns >= 0, try to acquire m's semaphore for at most ns nanoseconds. Return 0 if the semaphore was acquired, -1 if interrupted or timed out. func semawakeup(mp *m) Wake up mp, which is or will soon be sleeping on its semaphore.

const passive_spin = 1

passive_spin const #

const passive_spin = 1

passive_spin const #

const passive_spin = 1

pcTables var #

var pcTables []byte

pdEface var #

var pdEface any = *ast.CallExpr

pdNil const #

pollDesc contains 2 binary semaphores, rg and wg, to park reader and writer goroutines respectively. The semaphore can be in the following states: pdReady - io readiness notification is pending; a goroutine consumes the notification by changing the state to pdNil. pdWait - a goroutine prepares to park on the semaphore, but not yet parked; the goroutine commits to park by changing the state to G pointer, or, alternatively, concurrent io notification changes the state to pdReady, or, alternatively, concurrent timeout/close changes the state to pdNil. G pointer - the goroutine is blocked on the semaphore; io notification or timeout/close changes the state to pdReady or pdNil respectively and unparks the goroutine. pdNil - none of the above.

const pdNil uintptr = 0

pdReady const #

pollDesc contains 2 binary semaphores, rg and wg, to park reader and writer goroutines respectively. The semaphore can be in the following states: pdReady - io readiness notification is pending; a goroutine consumes the notification by changing the state to pdNil. pdWait - a goroutine prepares to park on the semaphore, but not yet parked; the goroutine commits to park by changing the state to G pointer, or, alternatively, concurrent io notification changes the state to pdReady, or, alternatively, concurrent timeout/close changes the state to pdNil. G pointer - the goroutine is blocked on the semaphore; io notification or timeout/close changes the state to pdReady or pdNil respectively and unparks the goroutine. pdNil - none of the above.

const pdReady uintptr = 1

pdType var #

var pdType *_type = *ast.CallExpr._type

pdWait const #

pollDesc contains 2 binary semaphores, rg and wg, to park reader and writer goroutines respectively. The semaphore can be in the following states: pdReady - io readiness notification is pending; a goroutine consumes the notification by changing the state to pdNil. pdWait - a goroutine prepares to park on the semaphore, but not yet parked; the goroutine commits to park by changing the state to G pointer, or, alternatively, concurrent io notification changes the state to pdReady, or, alternatively, concurrent timeout/close changes the state to pdNil. G pointer - the goroutine is blocked on the semaphore; io notification or timeout/close changes the state to pdReady or pdNil respectively and unparks the goroutine. pdNil - none of the above.

const pdWait uintptr = 2

pds var #

var pds []*pollDesc

pds var #

var pds []*pollDesc

pendingPreemptSignals var #

pendingPreemptSignals is the number of preemption signals that have been sent but not received. This is only used on Darwin. For #41702.

var pendingPreemptSignals atomic.Int32

pendingUpdates var #

var pendingUpdates int32

perThreadSyscall var #

perThreadSyscall is the system call to execute for the ongoing doAllThreadsSyscall. perThreadSyscall may only be written while mp.needPerThreadSyscall == 0 on all Ms.

var perThreadSyscall perThreadSyscallArgs

persistentChunkSize const #

persistentChunkSize is the number of bytes we allocate when we grow a persistentAlloc.

const persistentChunkSize = *ast.BinaryExpr

persistentChunks var #

persistentChunks is a list of all the persistent chunks we have allocated. The list is maintained through the first word in the persistent chunk. This is updated atomically.

var persistentChunks *notInHeap

pfds var #

var pfds []pollfd

physHugePageShift var #

physHugePageSize is the size in bytes of the OS's default physical huge page size whose allocation is opaque to the application. It is assumed and verified to be a power of two. If set, this must be set by the OS init code (typically in osinit) before mallocinit. However, setting it at all is optional, and leaving the default value is always safe (though potentially less efficient). Since physHugePageSize is always assumed to be a power of two, physHugePageShift is defined as physHugePageSize == 1 << physHugePageShift. The purpose of physHugePageShift is to avoid doing divisions in performance critical functions.

var physHugePageShift uint

physHugePageSize var #

physHugePageSize is the size in bytes of the OS's default physical huge page size whose allocation is opaque to the application. It is assumed and verified to be a power of two. If set, this must be set by the OS init code (typically in osinit) before mallocinit. However, setting it at all is optional, and leaving the default value is always safe (though potentially less efficient). Since physHugePageSize is always assumed to be a power of two, physHugePageShift is defined as physHugePageSize == 1 << physHugePageShift. The purpose of physHugePageShift is to avoid doing divisions in performance critical functions.

var physHugePageSize uintptr

physPageAlignedStacks const #

physPageAlignedStacks indicates whether stack allocations must be physical page aligned. This is a requirement for MAP_STACK on OpenBSD.

const physPageAlignedStacks = *ast.BinaryExpr

physPageSize var #

physPageSize is the size in bytes of the OS's physical pages. Mapping and unmapping operations must be done at multiples of physPageSize. This must be set by the OS init code (typically in osinit) before mallocinit.

var physPageSize uintptr

pid var #

var pid = *ast.CallExpr

pinnedTypemaps var #

pinnedTypemaps are the map[typeOff]*_type from the moduledata objects. These typemap objects are allocated at run time on the heap, but the only direct reference to them is in the moduledata, created by the linker and marked SNOPTRDATA so it is ignored by the GC. To make sure the map isn't collected, we keep a second reference here.

var pinnedTypemaps []map[typeOff]*_type

pinnerLeakPanic var #

to be able to test that the GC panics when a pinned pointer is leaking, this panic function is a variable, that can be overwritten by a test.

var pinnerLeakPanic = *ast.FuncLit

pinnerRefStoreSize const #

const pinnerRefStoreSize = *ast.BinaryExpr

pinnerSize const #

const pinnerSize = 64

pollBlockSize const #

const pollBlockSize = *ast.BinaryExpr

pollClosing const #

const pollClosing = *ast.BinaryExpr

pollErrClosing const #

Error codes returned by runtime_pollReset and runtime_pollWait. These must match the values in internal/poll/fd_poll_runtime.go.

const pollErrClosing = 1

pollErrNotPollable const #

Error codes returned by runtime_pollReset and runtime_pollWait. These must match the values in internal/poll/fd_poll_runtime.go.

const pollErrNotPollable = 3

pollErrTimeout const #

Error codes returned by runtime_pollReset and runtime_pollWait. These must match the values in internal/poll/fd_poll_runtime.go.

const pollErrTimeout = 2

pollEventErr const #

const pollEventErr

pollExpiredReadDeadline const #

const pollExpiredReadDeadline

pollExpiredWriteDeadline const #

const pollExpiredWriteDeadline

pollFDSeq const #

const pollFDSeq

pollFDSeqBits const #

const pollFDSeqBits = 20

pollFDSeqMask const #

const pollFDSeqMask = *ast.BinaryExpr

pollNoError const #

Error codes returned by runtime_pollReset and runtime_pollWait. These must match the values in internal/poll/fd_poll_runtime.go.

const pollNoError = 0

pollcache var #

var pollcache pollCache

poolcleanup var #

var poolcleanup func()

portfd var #

var portfd int32 = *ast.UnaryExpr

powrprofdll var #

var powrprofdll = [...]uint16{...}

preemptMSupported const #

const preemptMSupported = true

preemptMSupported const #

const preemptMSupported = false

preemptMSupported const #

const preemptMSupported = false

preemptMSupported const #

const preemptMSupported = true

printBacklog var #

printBacklog is a circular buffer of messages written with the builtin print* functions, for use in postmortem analysis of core dumps.

var printBacklog [512]byte

printBacklogIndex var #

var printBacklogIndex int

procAuxv var #

var procAuxv = *ast.CallExpr

procdir var #

var procdir = *ast.CallExpr

processorVersionInfo var #

Information about what cpu features are available. Packages outside the runtime should not use these as they are not an external api. Set on startup in asm_{386,amd64}.s

var processorVersionInfo uint32

prof var #

var prof struct{...}

profBlockLock var #

profBlockLock protects the contents of every blockRecord struct

var profBlockLock mutex

profBufBlocking const #

const profBufBlocking profBufReadMode = iota

profBufNonBlocking const #

const profBufNonBlocking

profBufTagCount const #

profBufTagCount is the size of the CPU profile buffer's storage for the goroutine tags associated with each sample. A capacity of 1<<14 means room for 16k samples, or 160 thread-seconds at a 100 Hz sample rate.

const profBufTagCount = *ast.BinaryExpr

profBufWordCount const #

profBufWordCount is the size of the CPU profile buffer's storage for the header and stack of each sample, measured in 64-bit words. Every sample has a required header of two words. With a small additional header (a word or two) and stacks at the profiler's maximum length of 64 frames, that capacity can support 1900 samples or 19 thread-seconds at a 100 Hz sample rate, at a cost of 1 MiB.

const profBufWordCount = *ast.BinaryExpr

profInsertLock var #

profInsertLock protects changes to the start of all *bucket linked lists

var profInsertLock mutex

profMemActiveLock var #

profMemActiveLock protects the active field of every memRecord struct

var profMemActiveLock mutex

profMemFutureLock var #

profMemFutureLock is a set of locks that protect the respective elements of the future array of every memRecord struct

var profMemFutureLock [*ast.CallExpr]mutex

profReaderSleeping const #

const profReaderSleeping profIndex = *ast.BinaryExpr

profWriteExtra const #

const profWriteExtra profIndex = *ast.BinaryExpr

profiletimer var #

var profiletimer uintptr

ptrBits const #

const ptrBits = *ast.BinaryExpr

ptrnames var #

var ptrnames = []string{...}

qq var #

var qq = [...]byte{...}

qsize const #

const qsize = 64

raceFiniLock var #

var raceFiniLock mutex

raceGetProcCmd const #

const raceGetProcCmd = iota

raceSymbolizeCodeCmd const #

const raceSymbolizeCodeCmd

raceSymbolizeDataCmd const #

const raceSymbolizeDataCmd

racearenaend var #

var racearenaend uintptr

racearenastart var #

start/end of heap for race_amd64.s

var racearenastart uintptr

racecgosync var #

var racecgosync uint64

racedataend var #

var racedataend uintptr

racedatastart var #

start/end of global data (data+bss).

var racedatastart uintptr

raceenabled const #

const raceenabled = false

raceenabled const #

const raceenabled = true

raceprocctx0 var #

var raceprocctx0 uintptr

randomizeScheduler const #

To shake out latent assumptions about scheduling order, we introduce some randomness into scheduling decisions when running with the race detector. The need for this was made obvious by changing the (deterministic) scheduling order in Go 1.5 and breaking many poorly-written tests. With the randomness here, as long as the tests pass consistently with -race, they shouldn't have latent scheduling assumptions.

const randomizeScheduler = raceenabled

rangeDoneError var #

var rangeDoneError = *ast.CallExpr

rangeExhaustedError var #

var rangeExhaustedError = *ast.CallExpr

rangeMissingPanicError var #

var rangeMissingPanicError = *ast.CallExpr

rangePanicError var #

var rangePanicError = *ast.CallExpr

rdwake var #

var rdwake int32

readRandomFailed var #

var readRandomFailed bool

reduceExtraPercent const #

reduceExtraPercent represents the amount of memory under the limit that the scavenger should target. For example, 5 means we target 95% of the limit. The purpose of shooting lower than the limit is to ensure that, once close to the limit, the scavenger is working hard to maintain it. If we have a memory limit set but are far away from it, there's no harm in leaving up to 100-retainExtraPercent live, and it's more efficient anyway, for the same reasons that retainExtraPercent exists.

const reduceExtraPercent = 5

reflectOffs var #

reflectOffs holds type offsets defined at run time by the reflect package. When a type is defined at run time, its *rtype data lives on the heap. There are a wide range of possible addresses the heap may use, that may not be representable as a 32-bit offset. Moreover the GC may one day start moving heap memory, in which case there is no stable offset that can be defined. To provide stable offsets, we add pin *rtype objects in a global map and treat the offset as an identifier. We use negative offsets that do not overlap with any compile-time module offsets. Entries are created by reflect.addReflectOff.

var reflectOffs struct{...}

repmovsPreferred const #

repmovsPreferred indicates that REP MOVSx instruction is more efficient on the CPU.

const repmovsPreferred = *ast.BinaryExpr

retSledSize const #

Keep in sync with the definition of ret_sled in src/runtime/libfuzzer_amd64.s

const retSledSize = 512

retainExtraPercent const #

retainExtraPercent represents the amount of memory over the heap goal that the scavenger should keep as a buffer space for the allocator. This constant is used when we do not have a memory limit set. The purpose of maintaining this overhead is to have a greater pool of unscavenged memory available for allocation (since using scavenged memory incurs an additional cost), to account for heap fragmentation and the ever-changing layout of the heap.

const retainExtraPercent = 10

riscv64AddrBits const #

riscv64 SV57 mode gives 56 bits of userspace VA. tagged pointer code supports it, but broader support for SV57 mode is incomplete, and there may be other issues (see #54104).

const riscv64AddrBits = 56

riscv64TagBits const #

const riscv64TagBits = *ast.BinaryExpr

rootBlockBytes const #

rootBlockBytes is the number of bytes to scan per data or BSS root.

const rootBlockBytes = *ast.BinaryExpr

rune1Max const #

const rune1Max = *ast.BinaryExpr

rune2Max const #

const rune2Max = *ast.BinaryExpr

rune3Max const #

const rune3Max = *ast.BinaryExpr

runeError const #

Numbers fundamental to the encoding.

const runeError = '\uFFFD'

runeSelf const #

Numbers fundamental to the encoding.

const runeSelf = 0x80

runningPanicDefers var #

runningPanicDefers is non-zero while running deferred functions for panic. This is used to try hard to get a panic stack trace out when exiting.

var runningPanicDefers atomic.Uint32

runtimeInitTime var #

runtimeInitTime is the nanotime() at which the runtime started.

var runtimeInitTime int64

runtime_inittasks var #

This slice records the initializing tasks that need to be done to start up the runtime. It is built by the linker.

var runtime_inittasks []*initTask

rwmutexMaxReaders const #

const rwmutexMaxReaders = *ast.BinaryExpr

sameSizeGrow const #

const sameSizeGrow = 8

scavChunkFlagsMask const #

const scavChunkFlagsMask = *ast.BinaryExpr

scavChunkHasFree const #

scavChunkHasFree indicates whether the chunk has anything left to scavenge. This is the opposite of "empty," used elsewhere in this file. The reason we say "HasFree" here is so the zero value is correct for a newly-grown chunk. (New memory is scavenged.)

const scavChunkHasFree scavChunkFlags = *ast.BinaryExpr

scavChunkHiOccFrac const #

scavChunkHiOcFrac indicates the fraction of pages that need to be allocated in the chunk in a single GC cycle for it to be considered high density.

const scavChunkHiOccFrac = 0.96875

scavChunkHiOccPages const #

const scavChunkHiOccPages = *ast.CallExpr

scavChunkInUseMask const #

const scavChunkInUseMask = *ast.BinaryExpr

scavChunkMaxFlags const #

scavChunkMaxFlags is the maximum number of flags we can have, given how a scavChunkData is packed into 8 bytes.

const scavChunkMaxFlags = 6

scavenge var #

var scavenge struct{...}

scavengeCostRatio const #

scavengeCostRatio is the approximate ratio between the costs of using previously scavenged memory and scavenging memory. For most systems the cost of scavenging greatly outweighs the costs associated with using scavenged memory, making this constant 0. On other systems (especially ones where "sysUsed" is not just a no-op) this cost is non-trivial. This ratio is used as part of multiplicative factor to help the scavenger account for the additional costs of using scavenged memory in its pacing.

const scavengeCostRatio = *ast.BinaryExpr

scavengeIndexArray var #

scavengeIndexArray is the backing store for p.scav.index.chunks. On 32-bit platforms, it's small enough to just be a global.

var scavengeIndexArray [*ast.BinaryExpr]atomicScavChunkData

scavengePercent const #

The background scavenger is paced according to these parameters. scavengePercent represents the portion of mutator time we're willing to spend on scavenging in percent.

const scavengePercent = 1

scavenger var #

Sleep/wait state of the background scavenger.

var scavenger scavengerState

sched var #

var sched schedt

secureMode var #

secureMode is only ever mutated in schedinit, so we don't need to worry about synchronization primitives.

var secureMode bool

secureMode var #

secureMode holds the value of AT_SECURE passed in the auxiliary vector.

var secureMode bool

secureMode var #

secureMode is only ever mutated in schedinit, so we don't need to worry about synchronization primitives.

var secureMode bool

selectDefault const #

const selectDefault

selectRecv const #

const selectRecv

selectSend const #

const selectSend

semTabSize const #

Prime to not correlate with any user patterns.

const semTabSize = 251

semaBlockProfile const #

const semaBlockProfile semaProfileFlags = *ast.BinaryExpr

semaMutexProfile const #

const semaMutexProfile

semtable var #

var semtable semTable

set_crosscall2 var #

set_crosscall2 is set by the runtime/cgo package set_crosscall2 should be an internal detail, but widely used packages access it using linkname. Notable members of the hall of shame include: - github.com/ebitengine/purego Do not remove or change the type signature. See go.dev/issue/67401. go:linkname set_crosscall2

var set_crosscall2 func()

shiftError var #

var shiftError = *ast.CallExpr

sig var #

sig handles communication between the signal handler and os/signal. Other than the inuse and recv fields, the fields are accessed atomically. The wanted and ignored fields are only written by one goroutine at a time; access is controlled by the handlers Mutex in os/signal. The fields are only read by that one goroutine and by the signal handler. We access them atomically to minimize the race between setting them in the goroutine calling os/signal and the signal handler, which may be running in a different thread. That race is unavoidable, as there is no connection between handling a signal and receiving one, but atomic instructions should minimize it.

var sig struct{...}

sig var #

var sig struct{...}

sigIdle const #

const sigIdle = iota

sigNoteRead var #

The read and write file descriptors used by the sigNote functions.

var sigNoteRead int32

sigNoteWrite var #

The read and write file descriptors used by the sigNote functions.

var sigNoteWrite int32

sigPerThreadSyscall const #

sigPerThreadSyscall is the same signal (SIGSETXID) used by glibc for per-thread syscalls on Linux. We use it for the same purpose in non-cgo binaries.

const sigPerThreadSyscall = *ast.BinaryExpr

sigPerThreadSyscall const #

sigPerThreadSyscall is only used on linux, so we assign a bogus signal number.

const sigPerThreadSyscall = *ast.BinaryExpr

sigPerThreadSyscall const #

sigPerThreadSyscall is only used on linux, so we assign a bogus signal number.

const sigPerThreadSyscall = *ast.BinaryExpr

sigPerThreadSyscall const #

sigPerThreadSyscall is only used on linux, so we assign a bogus signal number.

const sigPerThreadSyscall = *ast.BinaryExpr

sigPerThreadSyscall const #

sigPerThreadSyscall is only used on linux, so we assign a bogus signal number.

const sigPerThreadSyscall = *ast.BinaryExpr

sigPerThreadSyscall const #

sigPerThreadSyscall is only used on linux, so we assign a bogus signal number.

const sigPerThreadSyscall = *ast.BinaryExpr

sigPerThreadSyscall const #

sigPerThreadSyscall is only used on linux, so we assign a bogus signal number.

const sigPerThreadSyscall = *ast.BinaryExpr

sigPerThreadSyscall const #

sigPerThreadSyscall is only used on linux, so we assign a bogus signal number.

const sigPerThreadSyscall = *ast.BinaryExpr

sigPreempt const #

sigPreempt is the signal used for non-cooperative preemption. There's no good way to choose this signal, but there are some heuristics: 1. It should be a signal that's passed-through by debuggers by default. On Linux, this is SIGALRM, SIGURG, SIGCHLD, SIGIO, SIGVTALRM, SIGPROF, and SIGWINCH, plus some glibc-internal signals. 2. It shouldn't be used internally by libc in mixed Go/C binaries because libc may assume it's the only thing that can handle these signals. For example SIGCANCEL or SIGSETXID. 3. It should be a signal that can happen spuriously without consequences. For example, SIGALRM is a bad choice because the signal handler can't tell if it was caused by the real process alarm or not (arguably this means the signal is broken, but I digress). SIGUSR1 and SIGUSR2 are also bad because those are often used in meaningful ways by applications. 4. We need to deal with platforms without real-time signals (like macOS), so those are out. We use SIGURG because it meets all of these criteria, is extremely unlikely to be used by an application for its "real" meaning (both because out-of-band data is basically unused and because SIGURG doesn't report which socket has the condition, making it pretty useless), and even if it is, the application has to be ready for spurious SIGURG. SIGIO wouldn't be a bad choice either, but is more likely to be used for real.

const sigPreempt = _SIGURG

sigReceiving const #

const sigReceiving

sigSending const #

const sigSending

sign32 const #

const sign32 = *ast.BinaryExpr

sign64 const #

const sign64 = *ast.BinaryExpr

signalsOK var #

var signalsOK bool

sigprofCallers var #

If the signal handler receives a SIGPROF signal on a non-Go thread, it tries to collect a traceback into sigprofCallers. sigprofCallersUse is set to non-zero while sigprofCallers holds a traceback.

var sigprofCallers cgoCallers

sigprofCallersUse var #

var sigprofCallersUse uint32

sigsetAllExiting var #

sigsetAllExiting is used by sigblock(true) when a thread is exiting.

var sigsetAllExiting = *ast.CallExpr

sigset_all var #

var sigset_all = sigset{...}

sigset_all var #

var sigset_all = sigset{...}

sigset_all var #

var sigset_all = *ast.UnaryExpr

sigset_all var #

var sigset_all = *ast.UnaryExpr

sigset_all var #

var sigset_all = sigset{...}

sigset_all var #

var sigset_all = sigset{...}

sigset_all var #

var sigset_all = sigset{...}

sigset_all var #

var sigset_all = *ast.CallExpr

sigset_all var #

var sigset_all = sigset{...}

sigset_all var #

var sigset_all = sigset{...}

sigset_all var #

var sigset_all = sigset{...}

sigsysIgnored var #

sigsysIgnored is non-zero if we are currently ignoring SIGSYS. See issue #69065.

var sigsysIgnored uint32

sigtable var #

var sigtable = [...]sigTabT{...}

sigtable var #

var sigtable = [...]sigTabT{...}

sigtable var #

var sigtable = [...]sigTabT{...}

sigtable var #

Incoming notes are compared against this table using strncmp, so the order matters: longer patterns must appear before their prefixes. There are _SIG constants in os2_plan9.go for the table index of some of these. If you add entries to this table, you must respect the prefix ordering and also update the constant values is os2_plan9.go.

var sigtable = [...]sigTabT{...}

sigtable var #

var sigtable = [...]sigTabT{...}

sigtable var #

var sigtable = [...]sigTabT{...}

sigtable var #

var sigtable = [...]sigTabT{...}

sigtable var #

var sigtable = [...]sigTabT{...}

sigtable var #

var sigtable = [...]sigTabT{...}

sigtable var #

var sigtable = [...]sigTabT{...}

sigtramp var #

sigtramp is a function descriptor to _sigtramp defined in assembly

var sigtramp funcDescriptor

sizeClassBuckets var #

var sizeClassBuckets []float64

size_to_class128 var #

var size_to_class128 = [*ast.BinaryExpr]uint8{...}

size_to_class8 var #

var size_to_class8 = [*ast.BinaryExpr]uint8{...}

sliceEface var #

var sliceEface any = *ast.CallExpr

sliceType var #

var sliceType *_type = *ast.CallExpr._type

smallSizeDiv const #

const smallSizeDiv = 8

smallSizeMax const #

const smallSizeMax = 1024

sourceBits const #

sourceBits is the number of bits needed to represent a source. 4 bits can hold 16 different sources, which is more than enough. It is set to a low value so the overlapped entry key can contain as much bits as possible for the pollDesc pointer.

const sourceBits = 4

sourceMasks const #

const sourceMasks = *ast.BinaryExpr

spanAllocHeap const #

const spanAllocHeap spanAllocType = iota

spanAllocPtrScalarBits const #

const spanAllocPtrScalarBits

spanAllocStack const #

const spanAllocStack

spanAllocWorkBuf const #

const spanAllocWorkBuf

spanSetBlockEntries const #

const spanSetBlockEntries = 512

spanSetBlockPool var #

spanSetBlockPool is a global pool of spanSetBlocks.

var spanSetBlockPool spanSetBlockAlloc

spanSetInitSpineCap const #

const spanSetInitSpineCap = 256

stackDebug const #

stackDebug == 0: no logging == 1: logging of per-stack operations == 2: logging of per-frame operations == 3: logging of per-word updates == 4: logging of per-word reads

const stackDebug = 0

stackFaultOnFree const #

const stackFaultOnFree = 0

stackForceMove const #

Force a stack movement. Used for debugging. 0xfffffeed in hex.

const stackForceMove = *ast.BinaryExpr

stackFork const #

Thread is forking. Causes a split stack check failure. 0xfffffb2e in hex.

const stackFork = *ast.BinaryExpr

stackFromSystem const #

const stackFromSystem = 0

stackGuard const #

The stack guard is a pointer this many bytes above the bottom of the stack. The guard leaves enough room for a stackNosplit chain of NOSPLIT calls plus one stackSmall frame plus stackSystem bytes for the OS. This arithmetic must match that in cmd/internal/objabi/stack.go:StackLimit.

const stackGuard = *ast.BinaryExpr

stackLarge var #

Global pool of large stack spans.

var stackLarge struct{...}

stackMin const #

The minimum size of stack used by Go code

const stackMin = 2048

stackNoCache const #

const stackNoCache = 0

stackNosplit const #

stackNosplit is the maximum number of bytes that a chain of NOSPLIT functions can use. This arithmetic must match that in cmd/internal/objabi/stack.go:StackNosplit.

const stackNosplit = *ast.BinaryExpr

stackPoisonCopy var #

var stackPoisonCopy = 0

stackPoisonMin const #

stackPoisonMin is the lowest allowed stack poison value.

const stackPoisonMin = *ast.BinaryExpr

stackPreempt const #

Goroutine preemption request. 0xfffffade in hex.

const stackPreempt = *ast.BinaryExpr

stackSystem const #

stackSystem is a number of additional bytes to add to each stack below the usual guard area for OS-specific purposes like signal handling. Used on Windows, Plan 9, and iOS because they do not use a separate stack.

const stackSystem = *ast.BinaryExpr

stackTraceDebug const #

const stackTraceDebug = false

stackpool var #

Global pool of spans that have free stacks. Stacks are assigned an order according to size. order = log_2(size/FixedStack) There is a free list for each order.

var stackpool [_NumStackOrders]struct{...}

startingScavSleepRatio const #

It doesn't really matter what value we start at, but we can't be zero, because that'll cause divide-by-zero issues. Pick something conservative which we'll also use as a fallback.

const startingScavSleepRatio = 0.001

startingStackSize var #

startingStackSize is the amount of stack that new goroutines start with. It is a power of 2, and between fixedStack and maxstacksize, inclusive. startingStackSize is updated every GC by tracking the average size of stacks scanned during the GC.

var startingStackSize uint32 = fixedStack

starttime var #

var starttime int64

startupRand var #

OS-specific startup can set startupRand if the OS passes random data to the process at startup time. For example Linux passes 16 bytes in the auxv vector.

var startupRand []byte

staticLockRanking const #

const staticLockRanking = true

staticLockRanking const #

const staticLockRanking = false

staticuint64s var #

staticuint64s is used to avoid allocating in convTx for small integer values. staticuint64s[0] == 0, staticuint64s[1] == 1, and so forth. It is defined in assembler code so that it is read-only.

var staticuint64s [256]uint64

stealOrder var #

var stealOrder randomOrder

stopTheWorldContext var #

Temporary variable for stopTheWorld, when it can't write to the stack. Protected by worldsema.

var stopTheWorldContext worldStop

stringEface var #

var stringEface any = *ast.CallExpr

stringType var #

var stringType *_type = *ast.CallExpr._type

stwAllGoroutinesStack const #

Reasons to stop-the-world. Avoid reusing reasons and add new ones instead.

const stwAllGoroutinesStack

stwAllThreadsSyscall const #

Reasons to stop-the-world. Avoid reusing reasons and add new ones instead.

const stwAllThreadsSyscall

stwForTestCountPagesInUse const #

Reasons to stop-the-world. Avoid reusing reasons and add new ones instead.

const stwForTestCountPagesInUse

stwForTestPageCachePagesLeaked const #

Reasons to stop-the-world. Avoid reusing reasons and add new ones instead.

const stwForTestPageCachePagesLeaked

stwForTestReadMemStatsSlow const #

Reasons to stop-the-world. Avoid reusing reasons and add new ones instead.

const stwForTestReadMemStatsSlow

stwForTestReadMetricsSlow const #

Reasons to stop-the-world. Avoid reusing reasons and add new ones instead.

const stwForTestReadMetricsSlow

stwForTestResetDebugLog const #

Reasons to stop-the-world. Avoid reusing reasons and add new ones instead.

const stwForTestResetDebugLog

stwGCMarkTerm const #

Reasons to stop-the-world. Avoid reusing reasons and add new ones instead.

const stwGCMarkTerm

stwGCSweepTerm const #

Reasons to stop-the-world. Avoid reusing reasons and add new ones instead.

const stwGCSweepTerm

stwGOMAXPROCS const #

Reasons to stop-the-world. Avoid reusing reasons and add new ones instead.

const stwGOMAXPROCS

stwGoroutineProfile const #

Reasons to stop-the-world. Avoid reusing reasons and add new ones instead.

const stwGoroutineProfile

stwGoroutineProfileCleanup const #

Reasons to stop-the-world. Avoid reusing reasons and add new ones instead.

const stwGoroutineProfileCleanup

stwReadMemStats const #

Reasons to stop-the-world. Avoid reusing reasons and add new ones instead.

const stwReadMemStats

stwReasonStrings var #

If you add to this list, also add it to src/internal/trace/parser.go. If you change the values of any of the stw* constants, bump the trace version number and make a copy of this.

var stwReasonStrings = [...]string{...}

stwStartTrace const #

Reasons to stop-the-world. Avoid reusing reasons and add new ones instead.

const stwStartTrace

stwStopTrace const #

Reasons to stop-the-world. Avoid reusing reasons and add new ones instead.

const stwStopTrace

stwUnknown const #

Reasons to stop-the-world. Avoid reusing reasons and add new ones instead.

const stwUnknown stwReason = iota

stwWriteHeapDump const #

Reasons to stop-the-world. Avoid reusing reasons and add new ones instead.

const stwWriteHeapDump

subs var #

var subs []subscription

subscriptionClockAbstime const #

const subscriptionClockAbstime subclockflags = *ast.BinaryExpr

summaryL0Bits const #

const summaryL0Bits = *ast.BinaryExpr

summaryLevelBits const #

The number of radix bits for each level. The value of 3 is chosen such that the block of summaries we need to scan at each level fits in 64 bytes (2^3 summaries * 8 bytes per summary), which is close to the L1 cache line width on many systems. Also, a value of 3 fits 4 tree levels perfectly into the 21-bit pallocBits summary field at the root level. The following equation explains how each of the constants relate: summaryL0Bits + (summaryLevels-1)*summaryLevelBits + logPallocChunkBytes = heapAddrBits summaryLevels is an architecture-dependent value defined in mpagealloc_*.go.

const summaryLevelBits = 3

summaryLevels const #

The number of levels in the radix tree.

const summaryLevels = 4

summaryLevels const #

The number of levels in the radix tree.

const summaryLevels = 5

surrogateMax const #

Code points in the surrogate range are not valid for UTF-8.

const surrogateMax = 0xDFFF

surrogateMin const #

Code points in the surrogate range are not valid for UTF-8.

const surrogateMin = 0xD800

suspendLock var #

suspendLock protects simultaneous SuspendThread operations from suspending each other.

var suspendLock mutex

sweep var #

var sweep sweepdata

sweepClassDone const #

const sweepClassDone sweepClass = *ast.CallExpr

sweepDrainedMask const #

const sweepDrainedMask = *ast.BinaryExpr

sweepMinHeapDistance const #

sweepMinHeapDistance is a lower bound on the heap distance (in bytes) reserved for concurrent sweeping between GC cycles.

const sweepMinHeapDistance = *ast.BinaryExpr

sysDirectory var #

var sysDirectory [*ast.BinaryExpr]byte

sysDirectoryLen var #

var sysDirectoryLen uintptr

sysStatsDep const #

const sysStatsDep

sysTHPSizePath var #

var sysTHPSizePath = *ast.CallExpr

sysstat var #

var sysstat = *ast.CallExpr

t1 const #

const t1 = 0x00

t2 const #

const t2 = 0xC0

t3 const #

const t3 = 0xE0

t4 const #

const t4 = 0xF0

t5 const #

const t5 = 0xF8

tagAllocSample const #

const tagAllocSample = 17

tagBSS const #

const tagBSS = 13

tagBits const #

In addition to the 16 bits taken from the top, we can take 3 from the bottom, because node must be pointer-aligned, giving a total of 19 bits of count.

const tagBits = *ast.BinaryExpr

tagData const #

const tagData = 12

tagDefer const #

const tagDefer = 14

tagEOF const #

const tagEOF = 0

tagFinalizer const #

const tagFinalizer = 7

tagGoroutine const #

const tagGoroutine = 4

tagItab const #

const tagItab = 8

tagMemProf const #

const tagMemProf = 16

tagMemStats const #

const tagMemStats = 10

tagOSThread const #

const tagOSThread = 9

tagObject const #

const tagObject = 1

tagOtherRoot const #

const tagOtherRoot = 2

tagPanic const #

const tagPanic = 15

tagParams const #

const tagParams = 6

tagQueuedFinalizer const #

const tagQueuedFinalizer = 11

tagStackFrame const #

const tagStackFrame = 5

tagType const #

const tagType = 3

taggedPointerBits const #

The number of bits stored in the numeric tag of a taggedPointer

const taggedPointerBits = *ast.BinaryExpr

taggedPointerBits const #

The number of bits stored in the numeric tag of a taggedPointer

const taggedPointerBits = 32

testSigtrap var #

testSigtrap and testSigusr1 are used by the runtime tests. If non-nil, it is called on SIGTRAP/SIGUSR1. If it returns true, the normal behavior on this signal is suppressed.

var testSigtrap func(info *siginfo, ctxt *sigctxt, gp *g) bool

testSigusr1 var #

var testSigusr1 func(gp *g) bool

testSmallBuf const #

testSmallBuf forces a small write barrier buffer to stress write barrier flushing.

const testSmallBuf = false

test_x64 var #

TODO: These should be locals in testAtomic64, but we don't 8-byte align stack variables on 386.

var test_x64 uint64

test_z64 var #

TODO: These should be locals in testAtomic64, but we don't 8-byte align stack variables on 386.

var test_z64 uint64

threadStackSize const #

const threadStackSize = 0x100000

throwTypeNone const #

throwTypeNone means that we are not throwing.

const throwTypeNone throwType = iota

throwTypeRuntime const #

throwTypeRuntime is a throw due to a problem with Go itself. These throws include as much information as possible to aid in debugging the runtime, including runtime frames, system goroutines, and frame metadata.

const throwTypeRuntime

throwTypeUser const #

throwTypeUser is a throw due to a problem with the application. These throws do not include runtime frames, system goroutines, or frame metadata.

const throwTypeUser

ticks var #

var ticks ticksType

timeBeginPeriodRetValue var #

var timeBeginPeriodRetValue uint32

timeHistBuckets var #

var timeHistBuckets []float64

timeHistMaxBucketBits const #

const timeHistMaxBucketBits = 48

timeHistMinBucketBits const #

For the time histogram type, we use an HDR histogram. Values are placed in buckets based solely on the most significant set bit. Thus, buckets are power-of-2 sized. Values are then placed into sub-buckets based on the value of the next timeHistSubBucketBits most significant bits. Thus, sub-buckets are linear within a bucket. Therefore, the number of sub-buckets (timeHistNumSubBuckets) defines the error. This error may be computed as 1/timeHistNumSubBuckets*100%. For example, for 16 sub-buckets per bucket the error is approximately 6%. The number of buckets (timeHistNumBuckets), on the other hand, defines the range. To avoid producing a large number of buckets that are close together, especially for small numbers (e.g. 1, 2, 3, 4, 5 ns) that aren't very useful, timeHistNumBuckets is defined in terms of the least significant bit (timeHistMinBucketBits) that needs to be set before we start bucketing and the most significant bit (timeHistMaxBucketBits) that we bucket before we just dump it into a catch-all bucket. As an example, consider the configuration: timeHistMinBucketBits = 9 timeHistMaxBucketBits = 48 timeHistSubBucketBits = 2 Then: 011000001 ^-- │ ^ │ └---- Next 2 bits -> sub-bucket 3 └------- Bit 9 unset -> bucket 0 110000001 ^-- │ ^ │ └---- Next 2 bits -> sub-bucket 2 └------- Bit 9 set -> bucket 1 1000000010 ^-- ^ │ ^ └-- Lower bits ignored │ └---- Next 2 bits -> sub-bucket 0 └------- Bit 10 set -> bucket 2 Following this pattern, bucket 38 will have the bit 46 set. We don't have any buckets for higher values, so we spill the rest into an overflow bucket containing values of 2^47-1 nanoseconds or approx. 1 day or more. This range is more than enough to handle durations produced by the runtime.

const timeHistMinBucketBits = 9

timeHistNumBuckets const #

const timeHistNumBuckets = *ast.BinaryExpr

timeHistNumSubBuckets const #

const timeHistNumSubBuckets = *ast.BinaryExpr

timeHistSubBucketBits const #

const timeHistSubBucketBits = 2

timeHistTotalBuckets const #

Two extra buckets, one for underflow, one for overflow.

const timeHistTotalBuckets = *ast.BinaryExpr

timekeepSharedPage var #

var timekeepSharedPage *vdsoTimekeep

timerDebug const #

timerDebug enables printing a textual debug trace of all timer operations to stderr.

const timerDebug = false

timerHeapN const #

const timerHeapN = 4

timerHeaped const #

timerHeaped is set when the timer is stored in some P's heap.

const timerHeaped uint8 = *ast.BinaryExpr

timerModified const #

timerModified is set when t.when has been modified but the heap's heap[i].when entry still needs to be updated. That change waits until the heap in which the timer appears can be locked and rearranged. timerModified is only set when timerHeaped is also set.

const timerModified

timerZombie const #

timerZombie is set when the timer has been stopped but is still present in some P's heap. Only set when timerHeaped is also set. It is possible for timerModified and timerZombie to both be set, meaning that the timer was modified and then stopped. A timer sending to a channel may be placed in timerZombie to take it out of the heap even though the timer is not stopped, as long as nothing is reading from the channel.

const timerZombie

timerpMask var #

Bitmask of Ps that may have a timer, one bit per P. Reads and writes must be atomic. Length may change at safe points. Ideally, the timer mask would be kept immediately consistent on any timer operations. Unfortunately, updating a shared global data structure in the timer hot path adds too much overhead in applications frequently switching between no timers and some timers. As a compromise, the timer mask is updated only on pidleget / pidleput. A running P (returned by pidleget) may add a timer at any time, so its mask must be set. An idle P (passed to pidleput) cannot add new timers while idle, so if it has no timers at that time, its mask may be cleared. Thus, we get the following effects on timer-stealing in findrunnable: - Idle Ps with no timers when they go idle are never checked in findrunnable (for work- or timer-stealing; this is the ideal case). - Running Ps must always be checked. - Idle Ps whose timers are stolen must continue to be checked until they run again, even after timer expiration. When the P starts running again, the mask should be set, as a timer may be added at any time. TODO(prattmic): Additional targeted updates may improve the above cases. e.g., updating the mask when stealing a timer.

var timerpMask pMask

tinySizeClass const #

const tinySizeClass = _TinySizeClass

tinySpanClass const #

const tinySpanClass = *ast.CallExpr

tlsSize const #

const tlsSize = *ast.BinaryExpr

tlsSlots const #

tlsSlots is the number of pointer-sized slots reserved for TLS on some platforms, like Windows.

const tlsSlots = 6

tmpStringBufSize const #

The constant is known to the compiler. There is no fundamental theory behind this number.

const tmpStringBufSize = 32

tmpbuf var #

var tmpbuf []byte

trace var #

trace is global tracing context.

var trace struct{...}

traceAdvanceSema var #

var traceAdvanceSema uint32 = 1

traceAdvancer var #

Trace advancer goroutine.

var traceAdvancer traceAdvancerState

traceAllocFreeInfoBatch const #

Batch type values for the alloc/free experiment.

const traceAllocFreeInfoBatch

traceAllocFreeTypesBatch const #

Batch type values for the alloc/free experiment.

const traceAllocFreeTypesBatch = iota

traceBlockChanRecv const #

const traceBlockChanRecv

traceBlockChanSend const #

const traceBlockChanSend

traceBlockCondWait const #

const traceBlockCondWait

traceBlockDebugCall const #

const traceBlockDebugCall

traceBlockForever const #

const traceBlockForever

traceBlockGCMarkAssist const #

const traceBlockGCMarkAssist

traceBlockGCSweep const #

const traceBlockGCSweep

traceBlockGCWeakToStrongWait const #

const traceBlockGCWeakToStrongWait

traceBlockGeneric const #

const traceBlockGeneric traceBlockReason = iota

traceBlockNet const #

const traceBlockNet

traceBlockPreempted const #

const traceBlockPreempted

traceBlockReasonStrings var #

var traceBlockReasonStrings = [...]string{...}

traceBlockSelect const #

const traceBlockSelect

traceBlockSleep const #

const traceBlockSleep

traceBlockSync const #

const traceBlockSync

traceBlockSynctest const #

const traceBlockSynctest

traceBlockSystemGoroutine const #

const traceBlockSystemGoroutine

traceBlockUntilGCEnds const #

const traceBlockUntilGCEnds

traceBytesPerNumber const #

Maximum number of bytes required to encode uint64 in base-128.

const traceBytesPerNumber = 10

traceEvCPUSample const #

const traceEvCPUSample

traceEvCPUSamples const #

const traceEvCPUSamples

traceEvEventBatch const #

Structural events.

const traceEvEventBatch

traceEvExperimentalBatch const #

Batch event for an experimental batch with a custom format.

const traceEvExperimentalBatch

traceEvFrequency const #

const traceEvFrequency

traceEvGCActive const #

GC events.

const traceEvGCActive

traceEvGCBegin const #

const traceEvGCBegin

traceEvGCEnd const #

const traceEvGCEnd

traceEvGCMarkAssistActive const #

const traceEvGCMarkAssistActive

traceEvGCMarkAssistBegin const #

const traceEvGCMarkAssistBegin

traceEvGCMarkAssistEnd const #

const traceEvGCMarkAssistEnd

traceEvGCSweepActive const #

const traceEvGCSweepActive

traceEvGCSweepBegin const #

const traceEvGCSweepBegin

traceEvGCSweepEnd const #

const traceEvGCSweepEnd

traceEvGoBlock const #

const traceEvGoBlock

traceEvGoCreate const #

Goroutines.

const traceEvGoCreate

traceEvGoCreateBlocked const #

const traceEvGoCreateBlocked

traceEvGoCreateSyscall const #

const traceEvGoCreateSyscall

traceEvGoDestroy const #

const traceEvGoDestroy

traceEvGoDestroySyscall const #

const traceEvGoDestroySyscall

traceEvGoLabel const #

Annotations.

const traceEvGoLabel

traceEvGoStart const #

const traceEvGoStart

traceEvGoStatus const #

const traceEvGoStatus

traceEvGoStatusStack const #

GoStatus with stack.

const traceEvGoStatusStack

traceEvGoStop const #

const traceEvGoStop

traceEvGoSwitch const #

Coroutines.

const traceEvGoSwitch

traceEvGoSwitchDestroy const #

const traceEvGoSwitchDestroy

traceEvGoSyscallBegin const #

const traceEvGoSyscallBegin

traceEvGoSyscallEnd const #

const traceEvGoSyscallEnd

traceEvGoSyscallEndBlocked const #

const traceEvGoSyscallEndBlocked

traceEvGoUnblock const #

const traceEvGoUnblock

traceEvGoroutineStack const #

Experimental goroutine stack events. IDs map reversibly to addresses.

const traceEvGoroutineStack

traceEvGoroutineStackAlloc const #

Experimental events.

const traceEvGoroutineStackAlloc

traceEvGoroutineStackFree const #

Experimental events.

const traceEvGoroutineStackFree

traceEvHeapAlloc const #

const traceEvHeapAlloc

traceEvHeapGoal const #

const traceEvHeapGoal

traceEvHeapObject const #

Experimental heap object events. IDs map reversibly to addresses.

const traceEvHeapObject

traceEvHeapObjectAlloc const #

Experimental events.

const traceEvHeapObjectAlloc

traceEvHeapObjectFree const #

Experimental events.

const traceEvHeapObjectFree

traceEvNone const #

const traceEvNone traceEv = iota

traceEvProcStart const #

const traceEvProcStart

traceEvProcStatus const #

const traceEvProcStatus

traceEvProcSteal const #

const traceEvProcSteal

traceEvProcStop const #

const traceEvProcStop

traceEvProcsChange const #

Procs.

const traceEvProcsChange

traceEvSTWBegin const #

STW.

const traceEvSTWBegin

traceEvSTWEnd const #

const traceEvSTWEnd

traceEvSpan const #

Experimental heap span events. IDs map reversibly to base addresses.

const traceEvSpan

traceEvSpanAlloc const #

Experimental events.

const traceEvSpanAlloc

traceEvSpanFree const #

Experimental events.

const traceEvSpanFree

traceEvStack const #

const traceEvStack

traceEvStacks const #

const traceEvStacks

traceEvString const #

const traceEvString

traceEvStrings const #

const traceEvStrings

traceEvUserLog const #

const traceEvUserLog

traceEvUserRegionBegin const #

const traceEvUserRegionBegin

traceEvUserRegionEnd const #

const traceEvUserRegionEnd

traceEvUserTaskBegin const #

const traceEvUserTaskBegin

traceEvUserTaskEnd const #

const traceEvUserTaskEnd

traceExperimentAllocFree const #

traceExperimentAllocFree is an experiment to add alloc/free events to the trace.

const traceExperimentAllocFree

traceGoBad const #

const traceGoBad traceGoStatus = iota

traceGoRunnable const #

const traceGoRunnable

traceGoRunning const #

const traceGoRunning

traceGoStopGeneric const #

const traceGoStopGeneric traceGoStopReason = iota

traceGoStopGoSched const #

const traceGoStopGoSched

traceGoStopPreempted const #

const traceGoStopPreempted

traceGoStopReasonStrings var #

var traceGoStopReasonStrings = [...]string{...}

traceGoSyscall const #

const traceGoSyscall

traceGoWaiting const #

const traceGoWaiting

traceNoExperiment const #

traceNoExperiment indicates no experiment.

const traceNoExperiment traceExperiment = iota

traceNumExperiments const #

traceNumExperiments is the number of trace experiments (and 1 higher than the highest numbered experiment).

const traceNumExperiments

traceProcBad const #

const traceProcBad traceProcStatus = iota

traceProcIdle const #

const traceProcIdle

traceProcRunning const #

const traceProcRunning

traceProcSyscall const #

const traceProcSyscall

traceProcSyscallAbandoned const #

traceProcSyscallAbandoned is a special case of traceProcSyscall. It's used in the very specific case where the first a P is mentioned in a generation is part of a ProcSteal event. If that's the first time it's mentioned, then there's no GoSyscallBegin to connect the P stealing back to at that point. This special state indicates this to the parser, so it doesn't try to find a GoSyscallEndBlocked that corresponds with the ProcSteal.

const traceProcSyscallAbandoned

traceRegionAllocBlockData const #

const traceRegionAllocBlockData = *ast.BinaryExpr

traceShutdownSema var #

var traceShutdownSema uint32 = 1

traceStackSize const #

Maximum number of PCs in a single stack trace. Since events contain only stack id rather than whole stack trace, we can allow quite large values here.

const traceStackSize = 128

traceTimeDiv const #

Timestamps in trace are produced through either nanotime or cputicks and divided by traceTimeDiv. nanotime is used everywhere except on platforms where osHasLowResClock is true, because the system clock isn't granular enough to get useful information out of a trace in many cases. This makes absolute values of timestamp diffs smaller, and so they are encoded in fewer bytes. The target resolution in all cases is 64 nanoseconds. This is based on the fact that fundamentally the execution tracer won't emit events more frequently than roughly every 200 ns or so, because that's roughly how long it takes to call through the scheduler. We could be more aggressive and bump this up to 128 ns while still getting useful data, but the extra bit doesn't save us that much and the headroom is nice to have. Hitting this target resolution is easy in the nanotime case: just pick a division of 64. In the cputicks case it's a bit more complex. For x86, on a 3 GHz machine, we'd want to divide by 3*64 to hit our target. To keep the division operation efficient, we round that up to 4*64, or 256. Given what cputicks represents, we use this on all other platforms except for PowerPC. The suggested increment frequency for PowerPC's time base register is 512 MHz according to Power ISA v2.07 section 6.2, so we use 32 on ppc64 and ppc64le.

const traceTimeDiv = *ast.BinaryExpr

tracebackAll const #

Keep a cached value to make gotraceback fast, since we call it on every call to gentraceback. The cached value is a uint32 in which the low bits are the "crash" and "all" settings and the remaining bits are the traceback value (0 off, 1 on, 2 include system).

const tracebackAll

tracebackCrash const #

Keep a cached value to make gotraceback fast, since we call it on every call to gentraceback. The cached value is a uint32 in which the low bits are the "crash" and "all" settings and the remaining bits are the traceback value (0 off, 1 on, 2 include system).

const tracebackCrash = *ast.BinaryExpr

tracebackInnerFrames const #

tracebackInnerFrames is the number of innermost frames to print in a stack trace. The total maximum frames is tracebackInnerFrames + tracebackOuterFrames.

const tracebackInnerFrames = 50

tracebackOuterFrames const #

tracebackOuterFrames is the number of outermost frames to print in a stack trace.

const tracebackOuterFrames = 50

tracebackShift const #

Keep a cached value to make gotraceback fast, since we call it on every call to gentraceback. The cached value is a uint32 in which the low bits are the "crash" and "all" settings and the remaining bits are the traceback value (0 off, 1 on, 2 include system).

const tracebackShift = iota

traceback_cache var #

var traceback_cache uint32 = *ast.BinaryExpr

traceback_env var #

var traceback_env uint32

triggerRatioDen const #

These constants determine the bounds on the GC trigger as a fraction of heap bytes allocated between the start of a GC (heapLive == heapMarked) and the end of a GC (heapLive == heapGoal). The constants are obscured in this way for efficiency. The denominator of the fraction is always a power-of-two for a quick division, so that the numerator is a single constant integer multiplication.

const triggerRatioDen = 64

tstart var #

tstart is a function descriptor to _tstart defined in assembly.

var tstart funcDescriptor

tx const #

const tx = 0x80

typeCacheAssoc const #

Cache of types that have been serialized already. We use a type's hash field to pick a bucket. Inside a bucket, we keep a list of types that have been serialized so far, most recently used first. Note: when a bucket overflows we may end up serializing a type more than once. That's ok.

const typeCacheAssoc = 4

typeCacheBuckets const #

Cache of types that have been serialized already. We use a type's hash field to pick a bucket. Inside a bucket, we keep a list of types that have been serialized so far, most recently used first. Note: when a bucket overflows we may end up serializing a type more than once. That's ok.

const typeCacheBuckets = 256

typecache var #

var typecache [typeCacheBuckets]typeCacheBucket

uint16Eface var #

var uint16Eface any = *ast.CallExpr

uint16Type var #

var uint16Type *_type = *ast.CallExpr._type

uint32Eface var #

var uint32Eface any = *ast.CallExpr

uint32Type var #

var uint32Type *_type = *ast.CallExpr._type

uint64Eface var #

var uint64Eface any = *ast.CallExpr

uint64Type var #

var uint64Type *_type = *ast.CallExpr._type

uintptrMask const #

const uintptrMask = *ast.BinaryExpr

uniqueMapCleanup var #

var uniqueMapCleanup chan struct{...}

unknown const #

const unknown loggerType = iota

unwindJumpStack const #

unwindJumpStack indicates that, if the traceback is on a system stack, it should resume tracing at the user stack when the system stack is exhausted.

const unwindJumpStack

unwindPrintErrors const #

unwindPrintErrors indicates that if unwinding encounters an error, it should print a message and stop without throwing. This is used for things like stack printing, where it's better to get incomplete information than to crash. This is also used in situations where everything may not be stopped nicely and the stack walk may not be able to complete, such as during profiling signals or during a crash. If neither unwindPrintErrors or unwindSilentErrors are set, unwinding performs extra consistency checks and throws on any error. Note that there are a small number of fatal situations that will throw regardless of unwindPrintErrors or unwindSilentErrors.

const unwindPrintErrors unwindFlags = *ast.BinaryExpr

unwindSilentErrors const #

unwindSilentErrors silently ignores errors during unwinding.

const unwindSilentErrors

unwindTrap const #

unwindTrap indicates that the initial PC and SP are from a trap, not a return PC from a call. The unwindTrap flag is updated during unwinding. If set, frame.pc is the address of a faulting instruction instead of the return address of a call. It also means the liveness at pc may not be known. TODO: Distinguish frame.continpc, which is really the stack map PC, from the actual continuation PC, which is computed differently depending on this flag and a few other things.

const unwindTrap

urandom_dev var #

var urandom_dev = *ast.CallExpr

urandom_dev var #

var urandom_dev = *ast.CallExpr

urandom_dev var #

var urandom_dev = *ast.CallExpr

urandom_dev var #

var urandom_dev = *ast.CallExpr

urandom_dev var #

var urandom_dev = *ast.CallExpr

urandom_dev var #

var urandom_dev = *ast.CallExpr

urandom_dev var #

var urandom_dev = *ast.CallExpr

useAeshash var #

runtime variable to check if the processor we're running on actually supports the instructions used by the AES-based hash implementation.

var useAeshash bool

useCheckmark var #

If useCheckmark is true, marking of an object uses the checkmark bits instead of the standard mark bits.

var useCheckmark = false

userArenaChunkBytes const #

const userArenaChunkBytes = *ast.CallExpr

userArenaChunkBytesMax const #

userArenaChunkBytes is the size of a user arena chunk.

const userArenaChunkBytesMax = *ast.BinaryExpr

userArenaChunkMaxAllocBytes const #

userArenaChunkMaxAllocBytes is the maximum size of an object that can be allocated from an arena. This number is chosen to cap worst-case fragmentation of user arenas to 25%. Larger allocations are redirected to the heap.

const userArenaChunkMaxAllocBytes = *ast.BinaryExpr

userArenaChunkPages const #

userArenaChunkPages is the number of pages a user arena chunk uses.

const userArenaChunkPages = *ast.BinaryExpr

userArenaState var #

var userArenaState struct{...}

usesLR const #

const usesLR = *ast.BinaryExpr

utf16ConsoleBack var #

var utf16ConsoleBack [1000]uint16

utf16ConsoleBackLock var #

var utf16ConsoleBackLock mutex

vdsoArrayMax const #

vdsoArrayMax is the byte-size of a maximally sized array on this architecture. See cmd/compile/internal/arm64/galign.go arch.MAXWIDTH initialization.

const vdsoArrayMax = *ast.BinaryExpr

vdsoArrayMax const #

vdsoArrayMax is the byte-size of a maximally sized array on this architecture. See cmd/compile/internal/s390x/galign.go arch.MAXWIDTH initialization.

const vdsoArrayMax = *ast.BinaryExpr

vdsoArrayMax const #

vdsoArrayMax is the byte-size of a maximally sized array on this architecture. See cmd/compile/internal/ppc64/galign.go arch.MAXWIDTH initialization.

const vdsoArrayMax = *ast.BinaryExpr

vdsoArrayMax const #

vdsoArrayMax is the byte-size of a maximally sized array on this architecture. See cmd/compile/internal/amd64/galign.go arch.MAXWIDTH initialization.

const vdsoArrayMax = *ast.BinaryExpr

vdsoArrayMax const #

vdsoArrayMax is the byte-size of a maximally sized array on this architecture. See cmd/compile/internal/riscv64/galign.go arch.MAXWIDTH initialization.

const vdsoArrayMax = *ast.BinaryExpr

vdsoArrayMax const #

vdsoArrayMax is the byte-size of a maximally sized array on this architecture. See cmd/compile/internal/x86/galign.go arch.MAXWIDTH initialization, but must also be constrained to max +ve int.

const vdsoArrayMax = *ast.BinaryExpr

vdsoArrayMax const #

vdsoArrayMax is the byte-size of a maximally sized array on this architecture. See cmd/compile/internal/arm/galign.go arch.MAXWIDTH initialization, but must also be constrained to max +ve int.

const vdsoArrayMax = *ast.BinaryExpr

vdsoArrayMax const #

vdsoArrayMax is the byte-size of a maximally sized array on this architecture. See cmd/compile/internal/mips64/galign.go arch.MAXWIDTH initialization.

const vdsoArrayMax = *ast.BinaryExpr

vdsoArrayMax const #

vdsoArrayMax is the byte-size of a maximally sized array on this architecture. See cmd/compile/internal/loong64/galign.go arch.MAXWIDTH initialization.

const vdsoArrayMax = *ast.BinaryExpr

vdsoBloomSizeScale const #

vdsoBloomSizeScale is a scaling factor for gnuhash tables which are uint32 indexed, but contain uintptrs

const vdsoBloomSizeScale = *ast.BinaryExpr

vdsoClockgettimeSym var #

initialize to fall back to syscall

var vdsoClockgettimeSym uintptr = 0

vdsoClockgettimeSym var #

var vdsoClockgettimeSym uintptr

vdsoClockgettimeSym var #

initialize to fall back to syscall

var vdsoClockgettimeSym uintptr = 0

vdsoClockgettimeSym var #

initialize to fall back to syscall

var vdsoClockgettimeSym uintptr = 0

vdsoClockgettimeSym var #

var vdsoClockgettimeSym uintptr

vdsoClockgettimeSym var #

initialize to fall back to syscall

var vdsoClockgettimeSym uintptr = 0

vdsoClockgettimeSym var #

var vdsoClockgettimeSym uintptr

vdsoClockgettimeSym var #

var vdsoClockgettimeSym uintptr

vdsoClockgettimeSym var #

var vdsoClockgettimeSym uintptr

vdsoDynSize const #

const vdsoDynSize = *ast.BinaryExpr

vdsoGetrandomSym var #

var vdsoGetrandomSym uintptr

vdsoGetrandomSym var #

var vdsoGetrandomSym uintptr

vdsoGetrandomSym var #

var vdsoGetrandomSym uintptr

vdsoGetrandomSym var #

var vdsoGetrandomSym uintptr

vdsoGetrandomSym var #

var vdsoGetrandomSym uintptr

vdsoGettimeofdaySym var #

var vdsoGettimeofdaySym uintptr

vdsoHashSize const #

const vdsoHashSize = *ast.BinaryExpr

vdsoLinuxVersion var #

see man 7 vdso : mips

var vdsoLinuxVersion = vdsoVersionKey{...}

vdsoLinuxVersion var #

key and version at man 7 vdso : aarch64

var vdsoLinuxVersion = vdsoVersionKey{...}

vdsoLinuxVersion var #

var vdsoLinuxVersion = vdsoVersionKey{...}

vdsoLinuxVersion var #

var vdsoLinuxVersion = vdsoVersionKey{...}

vdsoLinuxVersion var #

not currently described in manpages as of May 2022, but will eventually appear when that happens, see man 7 vdso : loongarch

var vdsoLinuxVersion = vdsoVersionKey{...}

vdsoLinuxVersion var #

key and version at man 7 vdso : riscv

var vdsoLinuxVersion = vdsoVersionKey{...}

vdsoLinuxVersion var #

var vdsoLinuxVersion = vdsoVersionKey{...}

vdsoLinuxVersion var #

var vdsoLinuxVersion = vdsoVersionKey{...}

vdsoLinuxVersion var #

var vdsoLinuxVersion = vdsoVersionKey{...}

vdsoLoadEnd var #

var vdsoLoadEnd uintptr

vdsoLoadStart var #

var vdsoLoadStart uintptr

vdsoSymStringsSize const #

const vdsoSymStringsSize = vdsoArrayMax

vdsoSymTabSize const #

Maximum indices for the array types used when traversing the vDSO ELF structures. Computed from architecture-specific max provided by vdso_linux_*.go

const vdsoSymTabSize = *ast.BinaryExpr

vdsoSymbolKeys var #

var vdsoSymbolKeys = []vdsoSymbolKey{...}

vdsoSymbolKeys var #

var vdsoSymbolKeys = []vdsoSymbolKey{...}

vdsoSymbolKeys var #

The symbol name is not __kernel_clock_gettime as suggested by the manpage; according to Linux source code it should be __vdso_clock_gettime instead.

var vdsoSymbolKeys = []vdsoSymbolKey{...}

vdsoSymbolKeys var #

var vdsoSymbolKeys = []vdsoSymbolKey{...}

vdsoSymbolKeys var #

var vdsoSymbolKeys = []vdsoSymbolKey{...}

vdsoSymbolKeys var #

var vdsoSymbolKeys = []vdsoSymbolKey{...}

vdsoSymbolKeys var #

var vdsoSymbolKeys = []vdsoSymbolKey{...}

vdsoSymbolKeys var #

var vdsoSymbolKeys = []vdsoSymbolKey{...}

vdsoSymbolKeys var #

var vdsoSymbolKeys = []vdsoSymbolKey{...}

vdsoTimehandsSize const #

const vdsoTimehandsSize = 0x58

vdsoTimehandsSize const #

const vdsoTimehandsSize = C.sizeof_struct_vdso_timehands

vdsoTimehandsSize const #

const vdsoTimehandsSize = 0x58

vdsoTimehandsSize const #

const vdsoTimehandsSize = 0x50

vdsoTimehandsSize const #

const vdsoTimehandsSize = 0x58

vdsoTimehandsSize const #

const vdsoTimehandsSize = 0x58

vdsoTimekeepSize const #

const vdsoTimekeepSize = 0x10

vdsoTimekeepSize const #

const vdsoTimekeepSize = C.sizeof_struct_vdso_timekeep

vdsoTimekeepSize const #

const vdsoTimekeepSize = 0x10

vdsoTimekeepSize const #

const vdsoTimekeepSize = 0x10

vdsoTimekeepSize const #

const vdsoTimekeepSize = 0x10

vdsoTimekeepSize const #

const vdsoTimekeepSize = 0xc

vdsoVerSymSize const #

const vdsoVerSymSize = *ast.BinaryExpr

verifyTimers const #

verifyTimers can be set to true to add debugging checks that the timer heaps are valid.

const verifyTimers = false

vgetrandomAlloc var #

var vgetrandomAlloc struct{...}

waitReasonChanReceive const #

const waitReasonChanReceive

waitReasonChanReceiveNilChan const #

const waitReasonChanReceiveNilChan

waitReasonChanSend const #

const waitReasonChanSend

waitReasonChanSendNilChan const #

const waitReasonChanSendNilChan

waitReasonCoroutine const #

const waitReasonCoroutine

waitReasonDebugCall const #

const waitReasonDebugCall

waitReasonDumpingHeap const #

const waitReasonDumpingHeap

waitReasonFinalizerWait const #

const waitReasonFinalizerWait

waitReasonFlushProcCaches const #

const waitReasonFlushProcCaches

waitReasonForceGCIdle const #

const waitReasonForceGCIdle

waitReasonGCAssistMarking const #

const waitReasonGCAssistMarking

waitReasonGCAssistWait const #

const waitReasonGCAssistWait

waitReasonGCMarkTermination const #

const waitReasonGCMarkTermination

waitReasonGCScavengeWait const #

const waitReasonGCScavengeWait

waitReasonGCSweepWait const #

const waitReasonGCSweepWait

waitReasonGCWeakToStrongWait const #

const waitReasonGCWeakToStrongWait

waitReasonGCWorkerActive const #

const waitReasonGCWorkerActive

waitReasonGCWorkerIdle const #

const waitReasonGCWorkerIdle

waitReasonGarbageCollection const #

const waitReasonGarbageCollection

waitReasonGarbageCollectionScan const #

const waitReasonGarbageCollectionScan

waitReasonIOWait const #

const waitReasonIOWait

waitReasonPageTraceFlush const #

const waitReasonPageTraceFlush

waitReasonPanicWait const #

const waitReasonPanicWait

waitReasonPreempted const #

const waitReasonPreempted

waitReasonSelect const #

const waitReasonSelect

waitReasonSelectNoCases const #

const waitReasonSelectNoCases

waitReasonSemacquire const #

const waitReasonSemacquire

waitReasonSleep const #

const waitReasonSleep

waitReasonStoppingTheWorld const #

const waitReasonStoppingTheWorld

waitReasonStrings var #

var waitReasonStrings = [...]string{...}

waitReasonSyncCondWait const #

const waitReasonSyncCondWait

waitReasonSyncMutexLock const #

const waitReasonSyncMutexLock

waitReasonSyncRWMutexLock const #

const waitReasonSyncRWMutexLock

waitReasonSyncRWMutexRLock const #

const waitReasonSyncRWMutexRLock

waitReasonSyncWaitGroupWait const #

const waitReasonSyncWaitGroupWait

waitReasonSynctestChanReceive const #

const waitReasonSynctestChanReceive

waitReasonSynctestChanSend const #

const waitReasonSynctestChanSend

waitReasonSynctestRun const #

const waitReasonSynctestRun

waitReasonSynctestSelect const #

const waitReasonSynctestSelect

waitReasonSynctestWait const #

const waitReasonSynctestWait

waitReasonTraceGoroutineStatus const #

const waitReasonTraceGoroutineStatus

waitReasonTraceProcStatus const #

const waitReasonTraceProcStatus

waitReasonTraceReaderBlocked const #

const waitReasonTraceReaderBlocked

waitReasonWaitForGCCycle const #

const waitReasonWaitForGCCycle

waitReasonZero const #

const waitReasonZero waitReason = iota

wasmStack var #

var wasmStack m0Stack

wbBufEntries const #

wbBufEntries is the maximum number of pointers that can be stored in the write barrier buffer. This trades latency for throughput amortization. Higher values amortize flushing overhead more, but increase the latency of flushing. Higher values also increase the cache footprint of the buffer. TODO: What is the latency cost of this? Tune this value.

const wbBufEntries = 512

wbMaxEntriesPerCall const #

Maximum number of entries that we need to ask from the buffer in a single call.

const wbMaxEntriesPerCall = 8

winmmdll var #

var winmmdll = [...]uint16{...}

work var #

var work workType

workbufAlloc const #

workbufAlloc is the number of bytes to allocate at a time for new workbufs. This must be a multiple of pageSize and should be a multiple of _WorkbufSize. Larger values reduce workbuf allocation overhead. Smaller values reduce heap fragmentation.

const workbufAlloc = *ast.BinaryExpr

worldIsStopped var #

worldIsStopped is accessed atomically to track world-stops. 1 == world stopped.

var worldIsStopped atomic.Uint32

worldsema var #

Holding worldsema grants an M the right to try to stop the world.

var worldsema uint32 = 1

writeBarrier var #

The compiler knows about this variable. If you change it, you must change builtin/runtime.go, too. If you change the first four bytes, you must also change the write barrier insertion code. writeBarrier should be an internal detail, but widely used packages access it using linkname. Notable members of the hall of shame include: - github.com/bytedance/sonic Do not remove or change the type signature. See go.dev/issue/67401. go:linkname writeBarrier

var writeBarrier struct{...}

writeBuf var #

var writeBuf [1024]byte

writeFD var #

guarded by printlock/printunlock.

var writeFD uintptr

writeHeader var #

var writeHeader = []byte{...}

writeLogd var #

var writeLogd = *ast.CallExpr

writePath var #

var writePath = *ast.CallExpr

writePos var #

var writePos int

wrwake var #

var wrwake int32

x86HasFMA var #

var x86HasFMA bool

x86HasPOPCNT var #

Set in runtime.cpuinit. TODO: deprecate these; use internal/cpu directly.

var x86HasPOPCNT bool

x86HasSSE41 var #

var x86HasSSE41 bool

xbuckets var #

var xbuckets atomic.UnsafePointer

zeroBintime var #

var zeroBintime bintime

zeroVal var #

zeroVal is used by reflect via linkname. zeroVal should be an internal detail, but widely used packages access it using linkname. Notable members of the hall of shame include: - github.com/ugorji/go/codec Do not remove or change the type signature. See go.dev/issue/67401. go:linkname zeroVal

var zeroVal [abi.ZeroValSize]byte

zerobase var #

base address for all 0-byte allocations

var zerobase uintptr

Type Aliases

EpollEvent type #

type EpollEvent C.struct_epoll_event

ExceptionState32 type #

type ExceptionState32 C.struct_i386_exception_state

ExceptionState64 type #

type ExceptionState64 C.struct_x86_exception_state64

FPControl type #

type FPControl C.struct_fp_control

FPStatus type #

type FPStatus C.struct_fp_status

FPregset type #

type FPregset C.elf_fpregset_t

FloatState32 type #

type FloatState32 C.struct_i386_float_state

FloatState64 type #

type FloatState64 C.struct_x86_float_state64

Fpreg type #

type Fpreg C.struct__fpreg

Fpreg1 type #

type Fpreg1 C.struct__fpreg

Fpregset type #

type Fpregset C.fpregset_t

Fpstate type #

type Fpstate C.struct__libc_fpstate

Fpstate type #

type Fpstate C.struct__fpstate

Fpstate1 type #

type Fpstate1 C.struct__fpstate

Fpxreg type #

type Fpxreg C.struct__fpxreg

Fpxreg type #

type Fpxreg C.struct__libc_fpxreg

Fpxreg1 type #

type Fpxreg1 C.struct__fpxreg

Gregset type #

type Gregset C.elf_gregset_t

Itimerspec type #

type Itimerspec C.struct_itimerspec

Itimerval type #

type Itimerval C.struct_itimerval

Itimerval type #

type Itimerval C.struct_itimerval

Itimerval type #

type Itimerval C.struct_itimerval

Itimerval type #

type Itimerval C.struct_itimerval

Itimerval type #

type Itimerval C.struct_itimerval

Itimerval type #

type Itimerval C.struct_itimerval

Itimerval type #

type Itimerval C.struct_itimerval

Itimerval type #

type Itimerval C.struct_itimerval

Itimerval type #

type Itimerval C.struct_itimerval

Kevent type #

type Kevent C.struct_kevent

Kevent type #

type Kevent C.struct_kevent

Kevent type #

type Kevent C.struct_kevent

KeventT type #

type KeventT C.struct_kevent

KeventT type #

type KeventT C.struct_kevent

Lwpparams type #

type Lwpparams C.struct_lwp_params

MachMsgTypeNumber type #

type MachMsgTypeNumber C.mach_msg_type_number_t

MachPort type #

type MachPort C.mach_port_t

MachTimebaseInfo type #

type MachTimebaseInfo C.mach_timebase_info_data_t

MachVMAddress type #

type MachVMAddress C.mach_vm_address_t

MachVMMapRead type #

type MachVMMapRead C.vm_map_read_t

MachVMRegionFlavour type #

type MachVMRegionFlavour C.vm_region_flavor_t

MachVMRegionInfo type #

type MachVMRegionInfo C.vm_region_info_t

MachVMSize type #

type MachVMSize C.mach_vm_size_t

Mcontext type #

type Mcontext C.mcontext_t

Mcontext type #

type Mcontext C.mcontext_t

Mcontext type #

type Mcontext C.mcontext_t

Mcontext type #

type Mcontext C.mcontext_t

Mcontext32 type #

type Mcontext32 C.struct_mcontext32

Mcontext64 type #

type Mcontext64 C.struct_mcontext64

McontextT type #

type McontextT C.mcontext_t

PortEvent type #

type PortEvent C.port_event_t

Pthread type #

type Pthread C.pthread_t

Pthread type #

type Pthread C.pthread_t

Pthread type #

type Pthread C.pthread_t

PthreadAttr type #

type PthreadAttr C.pthread_attr_t

PthreadAttr type #

type PthreadAttr C.pthread_attr_t

PthreadAttr type #

type PthreadAttr C.pthread_attr_t

PthreadCond type #

type PthreadCond C.pthread_cond_t

PthreadCond type #

type PthreadCond C.pthread_cond_t

PthreadCondAttr type #

type PthreadCondAttr C.pthread_condattr_t

PthreadCondAttr type #

type PthreadCondAttr C.pthread_condattr_t

PthreadMutex type #

type PthreadMutex C.pthread_mutex_t

PthreadMutex type #

type PthreadMutex C.pthread_mutex_t

PthreadMutexAttr type #

type PthreadMutexAttr C.pthread_mutexattr_t

PthreadMutexAttr type #

type PthreadMutexAttr C.pthread_mutexattr_t

Ptregs type #

types used in sigcontext

type Ptregs C.struct_pt_regs

RegMMST type #

type RegMMST C.struct_mmst_reg

RegXMM type #

type RegXMM C.struct_xmm_reg

Regs32 type #

type Regs32 C.struct_i386_thread_state

Regs64 type #

type Regs64 C.struct_x86_thread_state64

Rtprio type #

type Rtprio C.struct_rtprio

Rtprio type #

type Rtprio C.struct_rtprio

SemT type #

type SemT C.sem_t

Sigaction type #

type Sigaction C.struct___sigaction

Sigaction type #

type Sigaction C.struct_xsigaction

Sigaction type #

type Sigaction C.struct_sigaction

Sigaction type #

type Sigaction C.struct_kernel_sigaction

Sigaction type #

type Sigaction C.struct_sigaction

Sigcontext type #

type Sigcontext C.struct_sigcontext

Sigcontext type #

PPC64 uses sigcontext in place of mcontext in ucontext. see https://git.kernel.org/cgit/linux/kernel/git/torvalds/linux.git/tree/arch/powerpc/include/uapi/asm/ucontext.h

type Sigcontext C.struct_sigcontext

Sigcontext type #

type Sigcontext C.struct_sigcontext

Sigcontext type #

type Sigcontext C.struct_sigcontext

Sigcontext type #

type Sigcontext C.struct_sigcontext

Sigevent type #

type Sigevent C.struct_sigevent

Sighandler type #

type Sighandler C.union___sigaction_u

Siginfo type #

type Siginfo C.siginfo_t

Siginfo type #

type Siginfo C.siginfo_t

Siginfo type #

type Siginfo C.siginfo_t

Siginfo type #

type Siginfo C.struct_xsiginfo

Siginfo type #

type Siginfo C.siginfo_t

Siginfo type #

type Siginfo C.struct__ksiginfo

Siginfo type #

type Siginfo C.siginfo_t

Siginfo type #

type Siginfo C.siginfo_t

Siginfo type #

type Siginfo C.siginfo_t

Sigset type #

type Sigset C.sigset_t

Sigset type #

type Sigset C.sigset_t

Sigset type #

type Sigset C.sigset_t

Sigset type #

type Sigset C.sigset_t

Sigset type #

type Sigset C.sigset_t

Sigset type #

type Sigset C.struct___sigset

Sigset type #

type Sigset C.struct___sigset

Sigval type #

type Sigval C.union_sigval

Sigval type #

type Sigval C.union_sigval

StackT type #

type StackT C.stack_t

StackT type #

type StackT C.stack_t

StackT type #

type StackT C.stack_t

StackT type #

type StackT C.stack_t

StackT type #

type StackT C.struct_sigaltstack

StackT type #

type StackT C.stack_t

StackT type #

type StackT C.stack_t

StackT type #

type StackT C.stack_t

StackT type #

type StackT C.stack_t

StackT type #

type StackT C.stack_t

Stat type #

depends on Timespec, must appear below

type Stat C.struct_stat

TforkT type #

type TforkT C.struct___tfork

ThrParam type #

type ThrParam C.struct_thr_param

Timespec type #

type Timespec C.struct_timespec

Timespec type #

type Timespec C.struct_timespec

Timespec type #

type Timespec C.struct_timespec

Timespec type #

type Timespec C.struct_timespec

Timespec type #

type Timespec C.struct_timespec

Timespec type #

type Timespec C.struct_timespec

Timespec type #

type Timespec C.struct_timespec

Timespec type #

type Timespec C.struct_timespec

Timespec type #

type Timespec C.struct_timespec

Timeval type #

type Timeval C.struct_timeval

Timeval type #

type Timeval C.struct_timeval

Timeval type #

type Timeval C.struct_timeval

Timeval type #

type Timeval C.struct_timeval

Timeval type #

type Timeval C.struct_timeval

Timeval type #

type Timeval C.struct_timeval

Timeval type #

type Timeval C.struct_timeval

Timeval type #

type Timeval C.struct_timeval

Timeval type #

type Timeval C.struct_timeval

Ucontext type #

type Ucontext C.struct_ucontext

Ucontext type #

type Ucontext C.ucontext_t

Ucontext type #

type Ucontext C.ucontext_t

Ucontext type #

type Ucontext C.struct_ucontext

Ucontext type #

type Ucontext C.ucontext_t

Ucontext type #

type Ucontext C.struct_ucontext

Ucontext type #

type Ucontext C.struct_ucontext

Ucontext type #

type Ucontext C.ucontext_t

UcontextT type #

type UcontextT C.ucontext_t

Umtx_time type #

type Umtx_time C.struct__umtx_time

Usigaction type #

type Usigaction C.struct_sigaction

Usigset type #

type Usigset C.__sigset_t

Usigset type #

type Usigset C.__sigset_t

Vreg type #

type Vreg C.elf_vrreg_t

Xmmreg type #

type Xmmreg C.struct__libc_xmmreg

Xmmreg type #

type Xmmreg C.struct__xmmreg

Xmmreg1 type #

type Xmmreg1 C.struct__xmmreg

_Ctype_struct___extctx type #

type _Ctype_struct___extctx uint64

_type type #

type _type abi.Type

abiPartKind type #

abiPartKind is the action an abiPart should take.

type abiPartKind int

arenaIdx type #

type arenaIdx uint

arraytype type #

type arraytype abi.ArrayType

bintime type #

type bintime C.struct_bintime

boundsErrorCode type #

type boundsErrorCode uint8

bucketType type #

type bucketType int

buckhashArray type #

type buckhashArray [buckHashSize]atomic.UnsafePointer

cgoCallers type #

Addresses collected in a cgo backtrace when crashing. Length must match arg.Max in x_cgo_callers in runtime/cgo/gcc_traceback.c.

type cgoCallers [32]uintptr

chantype type #

type chantype abi.ChanType

chunkIdx type #

Global chunk index. Represents an index into the leaf level of the radix tree. Similar to arenaIndex, except instead of arenas, it divides the address space into chunks.

type chunkIdx uint

clockid type #

https://github.com/WebAssembly/WASI/blob/a2b96e81c0586125cc4dc79a5be0b78d9a059925/legacy/preview1/docs.md#-clockid-variant

type clockid uint32

context64 type #

type context64 C.struct___context64

dlogger type #

type dlogger dloggerFake

dlogger type #

dlogger is the underlying implementation of the dlogger interface, selected at build time. We use a type alias instead of struct embedding so that the dlogger type is identical to the type returned by method chaining on the methods of this type.

type dlogger *dloggerImpl

errno type #

https://github.com/WebAssembly/WASI/blob/a2b96e81c0586125cc4dc79a5be0b78d9a059925/legacy/preview1/docs.md#-errno-variant

type errno uint32

errorString type #

An errorString represents a runtime error described by a single string.

type errorString string

eventrwflags type #

type eventrwflags uint16

eventtype type #

type eventtype uint8

filesize type #

https://github.com/WebAssembly/WASI/blob/a2b96e81c0586125cc4dc79a5be0b78d9a059925/legacy/preview1/docs.md#-filesize-u64

type filesize uint64

functype type #

type functype abi.FuncType

gcDrainFlags type #

type gcDrainFlags int

gcMarkWorkerMode type #

gcMarkWorkerMode represents the mode that a concurrent mark worker should operate in. Concurrent marking happens through four different mechanisms. One is mutator assists, which happen in response to allocations and are not scheduled. The other three are variations in the per-P mark workers and are distinguished by gcMarkWorkerMode.

type gcMarkWorkerMode int

gcMode type #

gcMode indicates how concurrent a GC cycle should be.

type gcMode int

gcTriggerKind type #

type gcTriggerKind int

gclinkptr type #

A gclinkptr is a pointer to a gclink, but it is opaque to the garbage collector.

type gclinkptr uintptr

goroutineProfileState type #

goroutineProfileState indicates the status of a goroutine's stack for the current in-progress goroutine profile. Goroutines' stacks are initially "Absent" from the profile, and end up "Satisfied" by the time the profile is complete. While a goroutine's stack is being captured, its goroutineProfileState will be "InProgress" and it will not be able to run until the capture completes and the state moves to "Satisfied". Some goroutines (the finalizer goroutine, which at various times can be either a "system" or a "user" goroutine, and the goroutine that is coordinating the profile, any goroutines created during the profile) move directly to the "Satisfied" state.

type goroutineProfileState uint32

goroutineProfileStateHolder type #

type goroutineProfileStateHolder atomic.Uint32

guintptr type #

A guintptr holds a goroutine pointer, but typed as a uintptr to bypass write barriers. It is used in the Gobuf goroutine state and in scheduling lists that are manipulated without a P. The Gobuf.g goroutine pointer is almost always updated by assembly code. In one of the few places it is updated by Go code - func save - it must be treated as a uintptr to avoid a write barrier being emitted at a bad time. Instead of figuring out how to emit the write barriers missing in the assembly manipulation, we change the type of the field to uintptr, so that it does not require write barriers at all. Goroutine structs are published in the allg list and never freed. That will keep the goroutine structs from being collected. There is never a time that Gobuf.g's contain the only references to a goroutine: the publishing of the goroutine in allg comes first. Goroutine pointers are also kept in non-GC-visible places like TLS, so I can't see them ever moving. If we did want to start moving data in the GC, we'd need to allocate the goroutine structs from an alternate arena. Using guintptr doesn't make that problem any worse. Note that pollDesc.rg, pollDesc.wg also store g in uintptr form, so they would need to be updated too if g's start moving.

type guintptr uintptr

headTailIndex type #

headTailIndex represents a combined 32-bit head and 32-bit tail of a queue into a single 64-bit value.

type headTailIndex uint64

hex type #

The compiler knows that a print of a value of this type should use printhex instead of printuint (decimal).

type hex uint64

interfacetype type #

type interfacetype abi.InterfaceType

itab type #

type itab abi.ITab

itimerval type #

type itimerval C.struct_itimerval

jmpbuf type #

type jmpbuf C.struct___jmpbuf

lfstack type #

lfstack is the head of a lock-free stack. The zero value of lfstack is an empty list. This stack is intrusive. Nodes must embed lfnode as the first field. The stack does not keep GC-visible pointers to nodes, so the caller must ensure the nodes are allocated outside the Go heap.

type lfstack uint64

libFunc type #

type libFunc uintptr

libcFunc type #

type libcFunc uintptr

limiterEventStamp type #

limiterEventStamp is a nanotime timestamp packed with a limiterEventType.

type limiterEventStamp uint64

limiterEventType type #

limiterEventType indicates the type of an event occurring on some P. These events represent the full set of events that the GC CPU limiter tracks to execute its function. This type may use no more than limiterEventBits bits of information.

type limiterEventType uint8

lockRank type #

type lockRank int

loggerType type #

Prior to Android-L, logging was done through writes to /dev/log files implemented in kernel ring buffers. In Android-L, those /dev/log files are no longer accessible and logging is done through a centralized user-mode logger, logd. https://android.googlesource.com/platform/system/core/+/refs/tags/android-6.0.1_r78/liblog/logd_write.c

type loggerType int32

mSpanState type #

An mspan representing actual memory has state mSpanInUse, mSpanManual, or mSpanFree. Transitions between these states are constrained as follows: - A span may transition from free to in-use or manual during any GC phase. - During sweeping (gcphase == _GCoff), a span may transition from in-use to free (as a result of sweeping) or manual to free (as a result of stacks being freed). - During GC (gcphase != _GCoff), a span *must not* transition from manual or in-use to free. Because concurrent GC may read a pointer and then look up its span, the span state must be monotonic. Setting mspan.state to mSpanInUse or mSpanManual must be done atomically and only after all other span fields are valid. Likewise, if inspecting a span is contingent on it being mSpanInUse, the state should be loaded atomically and checked before depending on other fields. This allows the garbage collector to safely deal with potentially invalid pointers, since resolving such pointers may race with a span being allocated.

type mSpanState uint8

machMsgTypeNumber type #

type machMsgTypeNumber uint32

machMsgTypeNumber type #

type machMsgTypeNumber uint32

machPort type #

type machPort uint32

machPort type #

type machPort uint32

machVMAddress type #

type machVMAddress uint64

machVMAddress type #

type machVMAddress uint64

machVMMapRead type #

type machVMMapRead uint32

machVMMapRead type #

type machVMMapRead uint32

machVMRegionFlavour type #

type machVMRegionFlavour int32

machVMRegionFlavour type #

type machVMRegionFlavour int32

machVMRegionInfo type #

type machVMRegionInfo *int32

machVMRegionInfo type #

type machVMRegionInfo *int32

machVMSize type #

type machVMSize uint64

machVMSize type #

type machVMSize uint64

maptype type #

type maptype abi.SwissMapType

maptype type #

type maptype abi.OldMapType

memHdrPtr type #

type memHdrPtr uintptr

metricKind type #

metricKind is a runtime copy of runtime/metrics.ValueKind and must be kept structurally identical to that type.

type metricKind int

metricReader type #

type metricReader func() uint64

muintptr type #

muintptr is a *m that is not tracked by the garbage collector. Because we do free Ms, there are some additional constrains on muintptrs: 1. Never hold an muintptr locally across a safe point. 2. Any muintptr in the heap must be owned by the M itself so it can ensure it is not in use when the last true *m is released.

type muintptr uintptr

name type #

type name abi.Name

nameOff type #

type nameOff abi.NameOff

pMask type #

pMask is an atomic bitstring with one bit per P.

type pMask []uint32

pageBits type #

pageBits is a bitmap representing one bit per page in a palloc chunk.

type pageBits [*ast.BinaryExpr]uint64

pallocBits type #

pallocBits is a bitmap that tracks page allocations for at most one palloc chunk. The precise representation is an implementation detail, but for the sake of documentation, 0s are free pages and 1s are allocated pages.

type pallocBits pageBits

pallocSum type #

pallocSum is a packed summary type which packs three numbers: start, max, and end into a single 8-byte value. Each of these values are a summary of a bitmap and are thus counts, each of which may have a maximum value of 2^21 - 1, or all three may be equal to 2^21. The latter case is represented by just setting the 64th bit.

type pallocSum uint64

pinnerBits type #

pinnerBits is the same type as gcBits but has different methods.

type pinnerBits gcBits

plainError type #

plainError represents a runtime error described a string without the prefix "runtime error: " after invoking errorString.Error(). See Issue #14965.

type plainError string

pollInfo type #

pollInfo is the bits needed by netpollcheckerr, stored atomically, mostly duplicating state that is manipulated under lock in pollDesc. The one exception is the pollEventErr bit, which is maintained only in the pollInfo.

type pollInfo uint32

profAtomic type #

A profAtomic is the atomically-accessed word holding a profIndex.

type profAtomic uint64

profBufReadMode type #

profBufReadMode specifies whether to block when no data is available to read.

type profBufReadMode int

profIndex type #

A profIndex is the packet tag and data counts and flags bits, described above.

type profIndex uint64

pthread type #

type pthread uint32

pthread type #

type pthread uint32

pthread type #

type pthread uintptr

pthread type #

type pthread uintptr

pthread type #

type pthread C.pthread_t

pthread type #

type pthread uintptr

pthread type #

type pthread uintptr

pthread type #

type pthread uintptr

pthread type #

type pthread uintptr

pthread type #

type pthread uintptr

pthread type #

type pthread uintptr

pthread_attr type #

type pthread_attr *byte

pthread_attr type #

type pthread_attr C.pthread_attr_t

pthreadattr type #

type pthreadattr uintptr

pthreadattr type #

type pthreadattr uintptr

pthreadattr type #

type pthreadattr uintptr

pthreadattr type #

type pthreadattr uintptr

pthreadattr type #

type pthreadattr uintptr

pthreadattr type #

type pthreadattr uintptr

pthreadcond type #

type pthreadcond uintptr

pthreadcond type #

type pthreadcond uintptr

pthreadcond type #

type pthreadcond uintptr

pthreadcond type #

type pthreadcond uintptr

pthreadcond type #

type pthreadcond uintptr

pthreadcond type #

type pthreadcond uintptr

pthreadcondattr type #

type pthreadcondattr uintptr

pthreadcondattr type #

type pthreadcondattr uintptr

pthreadcondattr type #

type pthreadcondattr uintptr

pthreadcondattr type #

type pthreadcondattr uintptr

pthreadcondattr type #

type pthreadcondattr uintptr

pthreadcondattr type #

type pthreadcondattr uintptr

pthreadkey type #

type pthreadkey uint64

pthreadmutex type #

type pthreadmutex uintptr

pthreadmutex type #

type pthreadmutex uintptr

pthreadmutex type #

type pthreadmutex uintptr

pthreadmutex type #

type pthreadmutex uintptr

pthreadmutex type #

type pthreadmutex uintptr

pthreadmutex type #

type pthreadmutex uintptr

pthreadmutexattr type #

type pthreadmutexattr uintptr

pthreadmutexattr type #

type pthreadmutexattr uintptr

pthreadmutexattr type #

type pthreadmutexattr uintptr

pthreadmutexattr type #

type pthreadmutexattr uintptr

pthreadmutexattr type #

type pthreadmutexattr uintptr

pthreadmutexattr type #

type pthreadmutexattr uintptr

ptrtype type #

type ptrtype abi.PtrType

puintptr type #

type puintptr uintptr

rusage type #

type rusage C.struct_rusage

scavChunkFlags type #

scavChunkFlags is a set of bit-flags for the scavenger for each palloc chunk.

type scavChunkFlags uint8

selectDir type #

These values must match ../reflect/value.go:/SelectDir.

type selectDir int

semTable type #

type semTable [semTabSize]struct{...}

semaProfileFlags type #

type semaProfileFlags int

semt type #

type semt int32

semt type #

type semt C.sem_t

sigactiont type #

type sigactiont C.struct_sigaction

sigcontext type #

type sigcontext C.struct_sigcontext

siginfo type #

type siginfo C.siginfo_t

sigset type #

type sigset uint64

sigset type #

type sigset [4]uint64

sigset type #

It's hard to tease out exactly how big a Sigset is, but rt_sigprocmask crashes if we get it wrong, so if binaries are running, this is right.

type sigset [2]uint32

sigset type #

type sigset C.sigset_t

sigset type #

type sigset [2]uint64

sigset type #

type sigset uint32

sigset type #

type sigset [4]uint32

sigset type #

type sigset uint32

size type #

https://github.com/WebAssembly/WASI/blob/a2b96e81c0586125cc4dc79a5be0b78d9a059925/legacy/preview1/docs.md#-size-u32

type size uint32

sliceInterfacePtr type #

The specialized convTx routines need a type descriptor to use when calling mallocgc. We don't need the type to be exact, just to have the correct size, alignment, and pointer-ness. However, when debugging, it'd be nice to have some indication in mallocgc where the types came from, so we use named types here. We then construct interface values of these types, and then extract the type word to use as needed.

type sliceInterfacePtr []byte

slicetype type #

type slicetype abi.SliceType

spanAllocType type #

spanAllocType represents the type of allocation to make, or the type of allocation to be freed.

type spanAllocType uint8

spanClass type #

A spanClass represents the size class and noscan-ness of a span. Each size class has a noscan spanClass and a scan spanClass. The noscan spanClass contains only noscan objects, which do not contain pointers and thus do not need to be scanned by the garbage collector.

type spanClass uint8

stackt type #

type stackt C.stack_t

statDep type #

statDep is a dependency on a group of statistics that a metric might have.

type statDep uint

statDepSet type #

statDepSet represents a set of statDeps. Under the hood, it's a bitmap.

type statDepSet [1]uint64

stdFunction type #

type stdFunction unsafe.Pointer

stringInterfacePtr type #

The specialized convTx routines need a type descriptor to use when calling mallocgc. We don't need the type to be exact, just to have the correct size, alignment, and pointer-ness. However, when debugging, it'd be nice to have some indication in mallocgc where the types came from, so we use named types here. We then construct interface values of these types, and then extract the type word to use as needed.

type stringInterfacePtr string

structtype type #

type structtype abi.StructType

stwReason type #

stwReason is an enumeration of reasons the world is stopping.

type stwReason uint8

subclockflags type #

type subclockflags uint16

subscriptionUnion type #

type subscriptionUnion [5]uint64

sweepClass type #

sweepClass is a spanClass and one bit to represent whether we're currently sweeping partial or full spans.

type sweepClass uint32

sysMemStat type #

sysMemStat represents a global system statistic that is managed atomically. This type must structurally be a uint64 so that mstats aligns with MemStats.

type sysMemStat uint64

taggedPointer type #

taggedPointer is a pointer with a numeric tag. The size of the numeric tag is GOARCH-dependent, currently at least 10 bits. This should only be used with pointers allocated outside the Go heap.

type taggedPointer uint64

textOff type #

type textOff abi.TextOff

thread type #

type thread int64

thread type #

type thread int32

thread type #

type thread int64

thread type #

type thread int32

thread type #

type thread int64

throwType type #

throwType indicates the current type of ongoing throw, which affects the amount of detail printed to stderr. Higher values include more detail.

type throwType uint32

timespec type #

type timespec C.struct_timespec

timestamp type #

https://github.com/WebAssembly/WASI/blob/a2b96e81c0586125cc4dc79a5be0b78d9a059925/legacy/preview1/docs.md#-timestamp-u64

type timestamp uint64

timestruc type #

type timestruc C.struct_timestruc_t

timeval type #

type timeval C.struct_timeval

tmpBuf type #

type tmpBuf [tmpStringBufSize]byte

traceArg type #

traceArg is a simple wrapper type to help ensure that arguments passed to traces are well-formed.

type traceArg uint64

traceBlockReason type #

traceBlockReason is an enumeration of reasons a goroutine might block. This is the interface the rest of the runtime uses to tell the tracer why a goroutine blocked. The tracer then propagates this information into the trace however it sees fit. Note that traceBlockReasons should not be compared, since reasons that are distinct by name may *not* be distinct by value.

type traceBlockReason uint8

traceEv type #

Event types in the trace, args are given in square brackets. Naming scheme: - Time range event pairs have suffixes "Begin" and "End". - "Start", "Stop", "Create", "Destroy", "Block", "Unblock" are suffixes reserved for scheduling resources. NOTE: If you add an event type, make sure you also update all tables in this file!

type traceEv uint8

traceExperiment type #

traceExperiment is an enumeration of the different kinds of experiments supported for tracing.

type traceExperiment uint8

traceGoStatus type #

traceGoStatus is the status of a goroutine. They correspond directly to the various goroutine statuses.

type traceGoStatus uint8

traceGoStopReason type #

traceGoStopReason is an enumeration of reasons a goroutine might yield. Note that traceGoStopReasons should not be compared, since reasons that are distinct by name may *not* be distinct by value.

type traceGoStopReason uint8

traceProcStatus type #

traceProcStatus is the status of a P. They mostly correspond to the various P statuses.

type traceProcStatus uint8

traceTime type #

traceTime represents a timestamp for the trace.

type traceTime uint64

tstate type #

type tstate C.struct_tstate

typeOff type #

type typeOff abi.TypeOff

ucontext type #

type ucontext C.ucontext_t

uinptr type #

type uinptr _Plink

uint16InterfacePtr type #

The specialized convTx routines need a type descriptor to use when calling mallocgc. We don't need the type to be exact, just to have the correct size, alignment, and pointer-ness. However, when debugging, it'd be nice to have some indication in mallocgc where the types came from, so we use named types here. We then construct interface values of these types, and then extract the type word to use as needed.

type uint16InterfacePtr uint16

uint32InterfacePtr type #

The specialized convTx routines need a type descriptor to use when calling mallocgc. We don't need the type to be exact, just to have the correct size, alignment, and pointer-ness. However, when debugging, it'd be nice to have some indication in mallocgc where the types came from, so we use named types here. We then construct interface values of these types, and then extract the type word to use as needed.

type uint32InterfacePtr uint32

uint64InterfacePtr type #

The specialized convTx routines need a type descriptor to use when calling mallocgc. We don't need the type to be exact, just to have the correct size, alignment, and pointer-ness. However, when debugging, it'd be nice to have some indication in mallocgc where the types came from, so we use named types here. We then construct interface values of these types, and then extract the type word to use as needed.

type uint64InterfacePtr uint64

uintptr32 type #

GOARCH=wasm currently has 64 bits pointers, but the WebAssembly host expects pointers to be 32 bits so we use this type alias to represent pointers in structs and arrays passed as arguments to WASI functions. Note that the use of an integer type prevents the compiler from tracking pointers passed to WASI functions, so we must use KeepAlive to explicitly retain the objects that could otherwise be reclaimed by the GC.

type uintptr32 uint32

uncommontype type #

type uncommontype abi.UncommonType

unwindFlags type #

unwindFlags control the behavior of various unwinders.

type unwindFlags uint8

userdata type #

type userdata uint64

vdsoTimehands type #

type vdsoTimehands C.struct_vdso_timehands

vdsoTimekeep type #

type vdsoTimekeep C.struct_vdso_timekeep

waitReason type #

A waitReason explains why a goroutine has been stopped. See gopark. Do not re-use waitReasons, add new ones.

type waitReason uint8

winlibcall type #

type winlibcall libcall

Interfaces

Error interface #

The Error interface identifies a run time error.

type Error interface {
error
RuntimeError()
}

floaty interface #

type floaty interface {
*ast.BinaryExpr
}

stringer interface #

type stringer interface {
String() string
}

Structs

BlockProfileRecord struct #

BlockProfileRecord describes blocking events originated at a particular call sequence (stack trace).

type BlockProfileRecord struct {
Count int64
Cycles int64
StackRecord
}

Cleanup struct #

Cleanup is a handle to a cleanup call for a specific object.

type Cleanup struct {
id uint64
ptr uintptr
}

Frame struct #

Frame is the information returned by [Frames] for each call frame.

type Frame struct {
PC uintptr
Func *Func
Function string
File string
Line int
startLine int
Entry uintptr
funcInfo funcInfo
}

Frames struct #

Frames may be used to get function/file/line information for a slice of PC values returned by [Callers].

type Frames struct {
callers []uintptr
nextPC uintptr
frames []Frame
frameStore [2]Frame
}

Func struct #

A Func represents a Go function in the running binary.

type Func struct {
opaque struct{...}
}

MemProfileRecord struct #

A MemProfileRecord describes the live objects allocated by a particular call sequence (stack trace).

type MemProfileRecord struct {
AllocBytes int64
FreeBytes int64
AllocObjects int64
FreeObjects int64
Stack0 [32]uintptr
}

MemStats struct #

A MemStats records statistics about the memory allocator.

type MemStats struct {
Alloc uint64
TotalAlloc uint64
Sys uint64
Lookups uint64
Mallocs uint64
Frees uint64
HeapAlloc uint64
HeapSys uint64
HeapIdle uint64
HeapInuse uint64
HeapReleased uint64
HeapObjects uint64
StackInuse uint64
StackSys uint64
MSpanInuse uint64
MSpanSys uint64
MCacheInuse uint64
MCacheSys uint64
BuckHashSys uint64
GCSys uint64
OtherSys uint64
NextGC uint64
LastGC uint64
PauseTotalNs uint64
PauseNs [256]uint64
PauseEnd [256]uint64
NumGC uint32
NumForcedGC uint32
GCCPUFraction float64
EnableGC bool
DebugGC bool
BySize [61]struct{...}
}

PanicNilError struct #

A PanicNilError happens when code calls panic(nil). Before Go 1.21, programs that called panic(nil) observed recover returning nil. Starting in Go 1.21, programs that call panic(nil) observe recover returning a *PanicNilError. Programs can change back to the old behavior by setting GODEBUG=panicnil=1.

type PanicNilError struct {
_ [0]*PanicNilError
}

Pinner struct #

A Pinner is a set of Go objects each pinned to a fixed location in memory. The [Pinner.Pin] method pins one object, while [Pinner.Unpin] unpins all pinned objects. See their comments for more information.

type Pinner struct {
*pinner
}

StackRecord struct #

A StackRecord describes a single execution stack.

type StackRecord struct {
Stack0 [32]uintptr
}

TypeAssertionError struct #

A TypeAssertionError explains a failed type assertion.

type TypeAssertionError struct {
_interface *_type
concrete *_type
asserted *_type
missingMethod string
}

_DISPATCHER_CONTEXT struct #

type _DISPATCHER_CONTEXT struct {
controlPc uint64
imageBase uint64
functionEntry uintptr
establisherFrame uint64
targetIp uint64
context *context
languageHandler uintptr
handlerData uintptr
}

_DISPATCHER_CONTEXT struct #

_DISPATCHER_CONTEXT is not defined on 386.

type _DISPATCHER_CONTEXT struct {

}

_DISPATCHER_CONTEXT struct #

type _DISPATCHER_CONTEXT struct {
controlPc uint64
imageBase uint64
functionEntry uintptr
establisherFrame uint64
targetIp uint64
context *context
languageHandler uintptr
handlerData uintptr
}

_DISPATCHER_CONTEXT struct #

type _DISPATCHER_CONTEXT struct {
controlPc uint32
imageBase uint32
functionEntry uintptr
establisherFrame uint32
targetIp uint32
context *context
languageHandler uintptr
handlerData uintptr
}

_OSVERSIONINFOW struct #

https://learn.microsoft.com/en-us/windows-hardware/drivers/ddi/wdm/ns-wdm-_osversioninfow

type _OSVERSIONINFOW struct {
osVersionInfoSize uint32
majorVersion uint32
minorVersion uint32
buildNumber uint32
platformId uint32
csdVersion [128]uint16
}

_defer struct #

A _defer holds an entry on the list of deferred calls. If you add a field here, add code to clear it in deferProcStack. This struct must match the code in cmd/compile/internal/ssagen/ssa.go:deferstruct and cmd/compile/internal/ssagen/ssa.go:(*state).call. Some defers will be allocated on the stack and some on the heap. All defers are logically part of the stack, so write barriers to initialize them are not required. All defers must be manually scanned, and for heap defers, marked.

type _defer struct {
heap bool
rangefunc bool
sp uintptr
pc uintptr
fn func()
link *_defer
head **ast.IndexExpr
}

_func struct #

Layout of in-memory per-function information prepared by linker See https://golang.org/s/go12symtab. Keep in sync with linker (../cmd/link/internal/ld/pcln.go:/pclntab) and with package debug/gosym and with symtab.go in package runtime.

type _func struct {
sys.NotInHeap
entryOff uint32
nameOff int32
args int32
deferreturn uint32
pcsp uint32
pcfile uint32
pcln uint32
npcdata uint32
cuOffset uint32
startLine int32
funcID abi.FuncID
flag abi.FuncFlag
_ [1]byte
nfuncdata uint8
}

_panic struct #

A _panic holds information about an active panic. A _panic value must only ever live on the stack. The argp and link fields are stack pointers, but don't need special handling during stack growth: because they are pointer-typed and _panic values only live on the stack, regular stack pointer adjustment takes care of them.

type _panic struct {
argp unsafe.Pointer
arg any
link *_panic
startPC uintptr
startSP unsafe.Pointer
sp unsafe.Pointer
lr uintptr
fp unsafe.Pointer
retpc uintptr
deferBitsPtr *uint8
slotsPtr unsafe.Pointer
recovered bool
goexit bool
deferreturn bool
}

_typePair struct #

type _typePair struct {
t1 *_type
t2 *_type
}

abiDesc struct #

abiDesc specifies how to translate from a C frame to a Go frame. This does not specify how to translate back because the result is always a uintptr. If the C ABI is fastcall, this assumes the four fastcall registers were first spilled to the shadow space.

type abiDesc struct {
parts []abiPart
srcStackSize uintptr
dstStackSize uintptr
dstSpill uintptr
dstRegisters int
retOffset uintptr
}

abiPart struct #

abiPart encodes a step in translating between calling ABIs.

type abiPart struct {
kind abiPartKind
srcStackOffset uintptr
dstStackOffset uintptr
dstRegister int
len uintptr
}

activeSweep struct #

activeSweep is a type that captures whether sweeping is done, and whether there are any outstanding sweepers. Every potential sweeper must call begin() before they look for work, and end() after they've finished sweeping.

type activeSweep struct {
state atomic.Uint32
}

addrRange struct #

addrRange represents a region of address space. An addrRange must never span a gap in the address space.

type addrRange struct {
base offAddr
limit offAddr
}

addrRanges struct #

addrRanges is a data structure holding a collection of ranges of address space. The ranges are coalesced eagerly to reduce the number ranges it holds. The slice backing store for this field is persistentalloc'd and thus there is no way to free it. addrRanges is not thread-safe.

type addrRanges struct {
ranges []addrRange
totalBytes uintptr
sysStat *sysMemStat
}

adjustinfo struct #

type adjustinfo struct {
old stack
delta uintptr
sghi uintptr
}

ancestorInfo struct #

ancestorInfo records details of where a goroutine was started.

type ancestorInfo struct {
pcs []uintptr
goid uint64
gopc uintptr
}

arenaHint struct #

arenaHint is a hint for where to grow the heap arenas. See mheap_.arenaHints.

type arenaHint struct {
_ sys.NotInHeap
addr uintptr
down bool
next *arenaHint
}

argset struct #

argset matches runtime/cgo/linux_syscall.c:argset_t

type argset struct {
args unsafe.Pointer
retval uintptr
}

atomicHeadTailIndex struct #

atomicHeadTailIndex is an atomically-accessed headTailIndex.

type atomicHeadTailIndex struct {
u atomic.Uint64
}

atomicMSpanPointer struct #

atomicMSpanPointer is an atomic.Pointer[mspan]. Can't use generics because it's NotInHeap.

type atomicMSpanPointer struct {
p atomic.UnsafePointer
}

atomicOffAddr struct #

atomicOffAddr is like offAddr, but operations on it are atomic. It also contains operations to be able to store marked addresses to ensure that they're not overridden until they've been seen.

type atomicOffAddr struct {
a atomic.Int64
}

atomicScavChunkData struct #

atomicScavChunkData is an atomic wrapper around a scavChunkData that stores it in its packed form.

type atomicScavChunkData struct {
value atomic.Uint64
}

atomicSpanSetSpinePointer struct #

atomicSpanSetSpinePointer is an atomically-accessed spanSetSpinePointer. It has the same semantics as atomic.UnsafePointer.

type atomicSpanSetSpinePointer struct {
a atomic.UnsafePointer
}

bintime struct #

type bintime struct {
sec int64
frac uint64
}

bintime struct #

type bintime struct {
sec int64
frac uint64
}

bintime struct #

type bintime struct {
sec int32
frac uint64
}

bintime struct #

type bintime struct {
sec int64
frac uint64
}

bintime struct #

type bintime struct {
sec int64
frac uint64
}

bitCursor struct #

A bitCursor is a simple cursor to memory to which we can write a set of bits.

type bitCursor struct {
ptr *byte
n uintptr
}

bitvector struct #

Information from the compiler about the layout of stack frames. Note: this type must agree with reflect.bitVector.

type bitvector struct {
n int32
bytedata *uint8
}

blockRecord struct #

A blockRecord is the bucket data for a bucket of type blockProfile, which is used in blocking and mutex profiles.

type blockRecord struct {
count float64
cycles int64
}

bmap struct #

A bucket for a Go map.

type bmap struct {
tophash [abi.OldMapBucketCount]uint8
}

boundsError struct #

A boundsError represents an indexing or slicing operation gone wrong.

type boundsError struct {
x int64
y int
signed bool
code boundsErrorCode
}

bucket struct #

A bucket holds per-call-stack profiling information. The representation is a bit sleazy, inherited from C. This struct defines the bucket header. It is followed in memory by the stack words and then the actual record data, either a memRecord or a blockRecord. Per-call-stack profiling information. Lookup by hashing call stack into a linked-list hash table. None of the fields in this bucket header are modified after creation, including its next and allnext links. No heap pointers.

type bucket struct {
_ sys.NotInHeap
next *bucket
allnext *bucket
typ bucketType
hash uintptr
size uintptr
nstk uintptr
}

callbackArgs struct #

type callbackArgs struct {
index uintptr
args unsafe.Pointer
result uintptr
retPop uintptr
}

cgoContextArg struct #

cgoContextArg is the type passed to the context function.

type cgoContextArg struct {
context uintptr
}

cgoSymbolizerArg struct #

cgoSymbolizerArg is the type passed to cgoSymbolizer.

type cgoSymbolizerArg struct {
pc uintptr
file *byte
lineno uintptr
funcName *byte
entry uintptr
more uintptr
data uintptr
}

cgoTracebackArg struct #

cgoTracebackArg is the type passed to cgoTraceback.

type cgoTracebackArg struct {
context uintptr
sigContext uintptr
buf *uintptr
max uintptr
}

cgothreadstart struct #

type cgothreadstart struct {
g guintptr
tls *uint64
fn unsafe.Pointer
}

checkmarksMap struct #

A checkmarksMap stores the GC marks in "checkmarks" mode. It is a per-arena bitmap with a bit for every word in the arena. The mark is stored on the bit corresponding to the first word of the marked allocation.

type checkmarksMap struct {
_ sys.NotInHeap
b [*ast.BinaryExpr]uint8
}

childInfo struct #

type childInfo struct {
argoff uintptr
arglen uintptr
args bitvector
sp *uint8
depth uintptr
}

consistentHeapStats struct #

consistentHeapStats represents a set of various memory statistics whose updates must be viewed completely to get a consistent state of the world. To write updates to memory stats use the acquire and release methods. To obtain a consistent global snapshot of these statistics, use read.

type consistentHeapStats struct {
stats [3]heapStatsDelta
gen atomic.Uint32
noPLock mutex
}

context struct #

type context struct {
contextflags uint32
dr0 uint32
dr1 uint32
dr2 uint32
dr3 uint32
dr6 uint32
dr7 uint32
floatsave floatingsavearea
seggs uint32
segfs uint32
seges uint32
segds uint32
edi uint32
esi uint32
ebx uint32
edx uint32
ecx uint32
eax uint32
ebp uint32
eip uint32
segcs uint32
eflags uint32
esp uint32
segss uint32
extendedregisters [512]uint8
}

context struct #

type context struct {
contextflags uint32
r0 uint32
r1 uint32
r2 uint32
r3 uint32
r4 uint32
r5 uint32
r6 uint32
r7 uint32
r8 uint32
r9 uint32
r10 uint32
r11 uint32
r12 uint32
spr uint32
lrr uint32
pc uint32
cpsr uint32
fpscr uint32
padding uint32
floatNeon [16]neon128
bvr [8]uint32
bcr [8]uint32
wvr [1]uint32
wcr [1]uint32
padding2 [2]uint32
}

context struct #

type context struct {
p1home uint64
p2home uint64
p3home uint64
p4home uint64
p5home uint64
p6home uint64
contextflags uint32
mxcsr uint32
segcs uint16
segds uint16
seges uint16
segfs uint16
seggs uint16
segss uint16
eflags uint32
dr0 uint64
dr1 uint64
dr2 uint64
dr3 uint64
dr6 uint64
dr7 uint64
rax uint64
rcx uint64
rdx uint64
rbx uint64
rsp uint64
rbp uint64
rsi uint64
rdi uint64
r8 uint64
r9 uint64
r10 uint64
r11 uint64
r12 uint64
r13 uint64
r14 uint64
r15 uint64
rip uint64
anon0 [512]byte
vectorregister [26]m128a
vectorcontrol uint64
debugcontrol uint64
lastbranchtorip uint64
lastbranchfromrip uint64
lastexceptiontorip uint64
lastexceptionfromrip uint64
}

context struct #

See https://docs.microsoft.com/en-us/windows/win32/api/winnt/ns-winnt-arm64_nt_context

type context struct {
contextflags uint32
cpsr uint32
x [31]uint64
xsp uint64
pc uint64
v [32]neon128
fpcr uint32
fpsr uint32
bcr [8]uint32
bvr [8]uint64
wcr [2]uint32
wvr [2]uint64
}

context64 struct #

type context64 struct {
gpr [32]uint64
msr uint64
iar uint64
lr uint64
ctr uint64
cr uint32
xer uint32
fpscr uint32
fpscrx uint32
except [1]uint64
fpr [32]float64
fpeu uint8
fpinfo uint8
fpscr24_31 uint8
pad [1]uint8
excp_type int32
}

coro struct #

A coro represents extra concurrency without extra parallelism, as would be needed for a coroutine implementation. The coro does not represent a specific coroutine, only the ability to do coroutine-style control transfers. It can be thought of as like a special channel that always has a goroutine blocked on it. If another goroutine calls coroswitch(c), the caller becomes the goroutine blocked in c, and the goroutine formerly blocked in c starts running. These switches continue until a call to coroexit(c), which ends the use of the coro by releasing the blocked goroutine in c and exiting the current goroutine. Coros are heap allocated and garbage collected, so that user code can hold a pointer to a coro without causing potential dangling pointer errors.

type coro struct {
gp guintptr
f func(*coro)
mp *m
lockedExt uint32
lockedInt uint32
}

cpuProfile struct #

type cpuProfile struct {
lock mutex
on bool
log *profBuf
extra [1000]uintptr
numExtra int
lostExtra uint64
lostAtomic uint64
}

cpuStats struct #

type cpuStats struct {
GCAssistTime int64
GCDedicatedTime int64
GCIdleTime int64
GCPauseTime int64
GCTotalTime int64
ScavengeAssistTime int64
ScavengeBgTime int64
ScavengeTotalTime int64
IdleTime int64
UserTime int64
TotalTime int64
}

cpuStatsAggregate struct #

cpuStatsAggregate represents CPU stats obtained from the runtime acquired together to avoid skew and inconsistencies.

type cpuStatsAggregate struct {
cpuStats
}

dbgVar struct #

type dbgVar struct {
name string
value *int32
atomic *atomic.Int32
def int32
}

debugCallWrapArgs struct #

type debugCallWrapArgs struct {
dispatch uintptr
callingG *g
}

debugLogBuf struct #

type debugLogBuf struct {
_ sys.NotInHeap
b [debugLogBytes]byte
}

debugLogReader struct #

type debugLogReader struct {
data *debugLogBuf
begin uint64
end uint64
tick uint64
nano uint64
}

debugLogWriter struct #

A debugLogWriter is a ring buffer of binary debug log records. A log record consists of a 2-byte framing header and a sequence of fields. The framing header gives the size of the record as a little endian 16-bit value. Each field starts with a byte indicating its type, followed by type-specific data. If the size in the framing header is 0, it's a sync record consisting of two little endian 64-bit values giving a new time base. Because this is a ring buffer, new records will eventually overwrite old records. Hence, it maintains a reader that consumes the log as it gets overwritten. That reader state is where an actual log reader would start.

type debugLogWriter struct {
_ sys.NotInHeap
write uint64
data debugLogBuf
tick uint64
nano uint64
r debugLogReader
buf [10]byte
}

dlogPerM struct #

type dlogPerM struct {

}

dlogPerM struct #

dlogPerM is the per-M debug log data. This is embedded in the m struct.

type dlogPerM struct {
dlogCache *dloggerImpl
}

dloggerFake struct #

A dloggerFake is a no-op implementation of dlogger.

type dloggerFake struct {

}

dloggerImpl struct #

A dloggerImpl writes to the debug log. To obtain a dloggerImpl, call dlog(). When done with the dloggerImpl, call end().

type dloggerImpl struct {
_ sys.NotInHeap
w debugLogWriter
allLink *dloggerImpl
owned atomic.Uint32
}

eface struct #

type eface struct {
_type *_type
data unsafe.Pointer
}

elfDyn struct #

type elfDyn struct {
d_tag int64
d_val uint64
}

elfDyn struct #

type elfDyn struct {
d_tag int32
d_val uint32
}

elfEhdr struct #

type elfEhdr struct {
e_ident [_EI_NIDENT]byte
e_type uint16
e_machine uint16
e_version uint32
e_entry uint32
e_phoff uint32
e_shoff uint32
e_flags uint32
e_ehsize uint16
e_phentsize uint16
e_phnum uint16
e_shentsize uint16
e_shnum uint16
e_shstrndx uint16
}

elfEhdr struct #

type elfEhdr struct {
e_ident [_EI_NIDENT]byte
e_type uint16
e_machine uint16
e_version uint32
e_entry uint64
e_phoff uint64
e_shoff uint64
e_flags uint32
e_ehsize uint16
e_phentsize uint16
e_phnum uint16
e_shentsize uint16
e_shnum uint16
e_shstrndx uint16
}

elfPhdr struct #

type elfPhdr struct {
p_type uint32
p_offset uint32
p_vaddr uint32
p_paddr uint32
p_filesz uint32
p_memsz uint32
p_flags uint32
p_align uint32
}

elfPhdr struct #

type elfPhdr struct {
p_type uint32
p_flags uint32
p_offset uint64
p_vaddr uint64
p_paddr uint64
p_filesz uint64
p_memsz uint64
p_align uint64
}

elfShdr struct #

type elfShdr struct {
sh_name uint32
sh_type uint32
sh_flags uint64
sh_addr uint64
sh_offset uint64
sh_size uint64
sh_link uint32
sh_info uint32
sh_addralign uint64
sh_entsize uint64
}

elfShdr struct #

type elfShdr struct {
sh_name uint32
sh_type uint32
sh_flags uint32
sh_addr uint32
sh_offset uint32
sh_size uint32
sh_link uint32
sh_info uint32
sh_addralign uint32
sh_entsize uint32
}

elfSym struct #

type elfSym struct {
st_name uint32
st_value uint32
st_size uint32
st_info byte
st_other byte
st_shndx uint16
}

elfSym struct #

type elfSym struct {
st_name uint32
st_info byte
st_other byte
st_shndx uint16
st_value uint64
st_size uint64
}

elfVerdaux struct #

type elfVerdaux struct {
vda_name uint32
vda_next uint32
}

elfVerdaux struct #

type elfVerdaux struct {
vda_name uint32
vda_next uint32
}

elfVerdef struct #

type elfVerdef struct {
vd_version uint16
vd_flags uint16
vd_ndx uint16
vd_cnt uint16
vd_hash uint32
vd_aux uint32
vd_next uint32
}

elfVerdef struct #

type elfVerdef struct {
vd_version uint16
vd_flags uint16
vd_ndx uint16
vd_cnt uint16
vd_hash uint32
vd_aux uint32
vd_next uint32
}

errorAddressString struct #

type errorAddressString struct {
msg string
addr uintptr
}

evacDst struct #

evacDst is an evacuation destination.

type evacDst struct {
b *bmap
i int
k unsafe.Pointer
e unsafe.Pointer
}

event struct #

type event struct {
gp *g
returned bool
}

event struct #

The go:wasmimport directive currently does not accept values of type uint16 in arguments or returns of the function signature. Most WASI imports return an errno value, which we have to define as uint32 because of that limitation. However, the WASI errno type is intended to be a 16 bits integer, and in the event struct the error field should be of type errno. If we used the errno type for the error field it would result in a mismatching field alignment and struct size because errno is declared as a 32 bits type, so we declare the error field as a plain uint16.

type event struct {
_ structs.HostLayout
userdata userdata
error uint16
typ eventtype
fdReadwrite eventFdReadwrite
}

eventFdReadwrite struct #

type eventFdReadwrite struct {
_ structs.HostLayout
nbytes filesize
flags eventrwflags
}

exceptionpointers struct #

type exceptionpointers struct {
record *exceptionrecord
context *context
}

exceptionrecord struct #

type exceptionrecord struct {
exceptioncode uint32
exceptionflags uint32
exceptionrecord *exceptionrecord
exceptionaddress uintptr
numberparameters uint32
exceptioninformation [15]uintptr
}

exceptionstate32 struct #

type exceptionstate32 struct {
trapno uint16
cpu uint16
err uint32
faultvaddr uint32
}

exceptionstate64 struct #

type exceptionstate64 struct {
far uint64
esr uint32
exc uint32
}

exceptionstate64 struct #

type exceptionstate64 struct {
trapno uint16
cpu uint16
err uint32
faultvaddr uint64
}

finalizer struct #

NOTE: Layout known to queuefinalizer.

type finalizer struct {
fn *funcval
arg unsafe.Pointer
nret uintptr
fint *_type
ot *ptrtype
}

finblock struct #

finblock is an array of finalizers to be executed. finblocks are arranged in a linked list for the finalizer queue. finblock is allocated from non-GC'd memory, so any heap pointers must be specially handled. GC currently assumes that the finalizer queue does not grow during marking (but it can shrink).

type finblock struct {
_ sys.NotInHeap
alllink *finblock
next *finblock
cnt uint32
_ int32
fin [*ast.BinaryExpr]finalizer
}

findfuncbucket struct #

findfuncbucket is an array of these structures. Each bucket represents 4096 bytes of the text segment. Each subbucket represents 256 bytes of the text segment. To find a function given a pc, locate the bucket and subbucket for that pc. Add together the idx and subbucket value to obtain a function index. Then scan the functab array starting at that index to find the target function. This table uses 20 bytes for every 4096 bytes of code, or ~0.5% overhead.

type findfuncbucket struct {
idx uint32
subbuckets [16]byte
}

fixalloc struct #

fixalloc is a simple free-list allocator for fixed size objects. Malloc uses a FixAlloc wrapped around sysAlloc to manage its mcache and mspan objects. Memory returned by fixalloc.alloc is zeroed by default, but the caller may take responsibility for zeroing allocations by setting the zero flag to false. This is only safe if the memory never contains heap pointers. The caller is responsible for locking around FixAlloc calls. Callers can keep state in the object but the first word is smashed by freeing and reallocating. Consider marking fixalloc'd types not in heap by embedding internal/runtime/sys.NotInHeap.

type fixalloc struct {
size uintptr
first func(arg unsafe.Pointer, p unsafe.Pointer)
arg unsafe.Pointer
list *mlink
chunk uintptr
nchunk uint32
nalloc uint32
inuse uintptr
stat *sysMemStat
zero bool
}

floatingsavearea struct #

type floatingsavearea struct {
controlword uint32
statusword uint32
tagword uint32
erroroffset uint32
errorselector uint32
dataoffset uint32
dataselector uint32
registerarea [80]uint8
cr0npxstate uint32
}

floatstate32 struct #

type floatstate32 struct {
fpu_reserved [2]int32
fpu_fcw fpcontrol
fpu_fsw fpstatus
fpu_ftw uint8
fpu_rsrv1 uint8
fpu_fop uint16
fpu_ip uint32
fpu_cs uint16
fpu_rsrv2 uint16
fpu_dp uint32
fpu_ds uint16
fpu_rsrv3 uint16
fpu_mxcsr uint32
fpu_mxcsrmask uint32
fpu_stmm0 regmmst
fpu_stmm1 regmmst
fpu_stmm2 regmmst
fpu_stmm3 regmmst
fpu_stmm4 regmmst
fpu_stmm5 regmmst
fpu_stmm6 regmmst
fpu_stmm7 regmmst
fpu_xmm0 regxmm
fpu_xmm1 regxmm
fpu_xmm2 regxmm
fpu_xmm3 regxmm
fpu_xmm4 regxmm
fpu_xmm5 regxmm
fpu_xmm6 regxmm
fpu_xmm7 regxmm
fpu_rsrv4 [224]int8
fpu_reserved1 int32
}

floatstate64 struct #

type floatstate64 struct {
fpu_reserved [2]int32
fpu_fcw fpcontrol
fpu_fsw fpstatus
fpu_ftw uint8
fpu_rsrv1 uint8
fpu_fop uint16
fpu_ip uint32
fpu_cs uint16
fpu_rsrv2 uint16
fpu_dp uint32
fpu_ds uint16
fpu_rsrv3 uint16
fpu_mxcsr uint32
fpu_mxcsrmask uint32
fpu_stmm0 regmmst
fpu_stmm1 regmmst
fpu_stmm2 regmmst
fpu_stmm3 regmmst
fpu_stmm4 regmmst
fpu_stmm5 regmmst
fpu_stmm6 regmmst
fpu_stmm7 regmmst
fpu_xmm0 regxmm
fpu_xmm1 regxmm
fpu_xmm2 regxmm
fpu_xmm3 regxmm
fpu_xmm4 regxmm
fpu_xmm5 regxmm
fpu_xmm6 regxmm
fpu_xmm7 regxmm
fpu_xmm8 regxmm
fpu_xmm9 regxmm
fpu_xmm10 regxmm
fpu_xmm11 regxmm
fpu_xmm12 regxmm
fpu_xmm13 regxmm
fpu_xmm14 regxmm
fpu_xmm15 regxmm
fpu_rsrv4 [96]int8
fpu_reserved1 int32
}

forcegcstate struct #

type forcegcstate struct {
lock mutex
g *g
idle atomic.Bool
}

fpcontrol struct #

type fpcontrol struct {
pad_cgo_0 [2]byte
}

fpreg struct #

type fpreg struct {
significand [4]uint16
exponent uint16
}

fpreg1 struct #

type fpreg1 struct {
significand [4]uint16
exponent uint16
}

fpregs struct #

type fpregs struct {
fp_x [64]uint64
fp_fcsr uint64
fp_flags int32
pad int32
}

fpregs struct #

type fpregs struct {
fp_q [64]uint64
fp_sr uint32
fp_cr uint32
fp_flags int32
fp_pad int32
}

fpregset struct #

type fpregset struct {
fp_reg_set [528]byte
}

fpstate struct #

type fpstate struct {
cwd uint16
swd uint16
ftw uint16
fop uint16
rip uint64
rdp uint64
mxcsr uint32
mxcr_mask uint32
_st [8]fpxreg
_xmm [16]xmmreg
padding [24]uint32
}

fpstate struct #

type fpstate struct {
cw uint32
sw uint32
tag uint32
ipoff uint32
cssel uint32
dataoff uint32
datasel uint32
_st [8]fpreg
status uint16
magic uint16
_fxsr_env [6]uint32
mxcsr uint32
reserved uint32
_fxsr_st [8]fpxreg
_xmm [8]xmmreg
padding1 [44]uint32
anon0 [48]byte
}

fpstate1 struct #

type fpstate1 struct {
cwd uint16
swd uint16
ftw uint16
fop uint16
rip uint64
rdp uint64
mxcsr uint32
mxcr_mask uint32
_st [8]fpxreg1
_xmm [16]xmmreg1
padding [24]uint32
}

fpstatus struct #

type fpstatus struct {
pad_cgo_0 [2]byte
}

fpxreg struct #

type fpxreg struct {
significand [4]uint16
exponent uint16
padding [3]uint16
}

fpxreg struct #

type fpxreg struct {
significand [4]uint16
exponent uint16
padding [3]uint16
}

fpxreg1 struct #

type fpxreg1 struct {
significand [4]uint16
exponent uint16
padding [3]uint16
}

funcDescriptor struct #

funcDescriptor is a structure representing a function descriptor A variable with this type is always created in assembler

type funcDescriptor struct {
fn uintptr
toc uintptr
envPointer uintptr
}

funcInfo struct #

type funcInfo struct {
*_func
datap *moduledata
}

funcinl struct #

Pseudo-Func that is returned for PCs that occur in inlined code. A *Func can be either a *_func or a *funcinl, and they are distinguished by the first uintptr. TODO(austin): Can we merge this with inlinedCall?

type funcinl struct {
ones uint32
entry uintptr
name string
file string
line int32
startLine int32
}

functab struct #

type functab struct {
entryoff uint32
funcoff uint32
}

funcval struct #

type funcval struct {
fn uintptr
}

g struct #

type g struct {
stack stack
stackguard0 uintptr
stackguard1 uintptr
_panic *_panic
_defer *_defer
m *m
sched gobuf
syscallsp uintptr
syscallpc uintptr
syscallbp uintptr
stktopsp uintptr
param unsafe.Pointer
atomicstatus atomic.Uint32
stackLock uint32
goid uint64
schedlink guintptr
waitsince int64
waitreason waitReason
preempt bool
preemptStop bool
preemptShrink bool
asyncSafePoint bool
paniconfault bool
gcscandone bool
throwsplit bool
activeStackChans bool
parkingOnChan atomic.Bool
inMarkAssist bool
coroexit bool
raceignore int8
nocgocallback bool
tracking bool
trackingSeq uint8
trackingStamp int64
runnableTime int64
lockedm muintptr
fipsIndicator uint8
sig uint32
writebuf []byte
sigcode0 uintptr
sigcode1 uintptr
sigpc uintptr
parentGoid uint64
gopc uintptr
ancestors *[]ancestorInfo
startpc uintptr
racectx uintptr
waiting *sudog
cgoCtxt []uintptr
labels unsafe.Pointer
timer *timer
sleepWhen int64
selectDone atomic.Uint32
goroutineProfiled goroutineProfileStateHolder
coroarg *coro
syncGroup *synctestGroup
trace gTraceState
gcAssistBytes int64
}

gList struct #

A gList is a list of Gs linked through g.schedlink. A G can only be on one gQueue or gList at a time.

type gList struct {
head guintptr
}

gQueue struct #

A gQueue is a dequeue of Gs linked through g.schedlink. A G can only be on one gQueue or gList at a time.

type gQueue struct {
head guintptr
tail guintptr
}

gTraceState struct #

gTraceState is per-G state for the tracer.

type gTraceState struct {
traceSchedResourceState
}

gcBgMarkWorkerNode struct #

gcBgMarkWorkerNode is an entry in the gcBgMarkWorkerPool. It points to a single gcBgMarkWorker goroutine.

type gcBgMarkWorkerNode struct {
node lfnode
gp guintptr
m muintptr
}

gcBits struct #

gcBits is an alloc/mark bitmap. This is always used as gcBits.x.

type gcBits struct {
_ sys.NotInHeap
x uint8
}

gcBitsArena struct #

type gcBitsArena struct {
_ sys.NotInHeap
free uintptr
next *gcBitsArena
bits [*ast.BinaryExpr]gcBits
}

gcBitsHeader struct #

type gcBitsHeader struct {
free uintptr
next uintptr
}

gcCPULimiterState struct #

type gcCPULimiterState struct {
lock atomic.Uint32
enabled atomic.Bool
gcEnabled bool
transitioning bool
test bool
bucket struct{...}
overflow uint64
assistTimePool atomic.Int64
idleMarkTimePool atomic.Int64
idleTimePool atomic.Int64
lastUpdate atomic.Int64
lastEnabledCycle atomic.Uint32
nprocs int32
}

gcControllerState struct #

type gcControllerState struct {
gcPercent atomic.Int32
memoryLimit atomic.Int64
heapMinimum uint64
runway atomic.Uint64
consMark float64
lastConsMark [4]float64
gcPercentHeapGoal atomic.Uint64
sweepDistMinTrigger atomic.Uint64
triggered uint64
lastHeapGoal uint64
heapLive atomic.Uint64
heapScan atomic.Uint64
lastHeapScan uint64
lastStackScan atomic.Uint64
maxStackScan atomic.Uint64
globalsScan atomic.Uint64
heapMarked uint64
heapScanWork atomic.Int64
stackScanWork atomic.Int64
globalsScanWork atomic.Int64
bgScanCredit atomic.Int64
assistTime atomic.Int64
dedicatedMarkTime atomic.Int64
fractionalMarkTime atomic.Int64
idleMarkTime atomic.Int64
markStartTime int64
dedicatedMarkWorkersNeeded atomic.Int64
idleMarkWorkers atomic.Uint64
assistWorkPerByte atomic.Float64
assistBytesPerWork atomic.Float64
fractionalUtilizationGoal float64
heapInUse sysMemStat
heapReleased sysMemStat
heapFree sysMemStat
totalAlloc atomic.Uint64
totalFree atomic.Uint64
mappedReady atomic.Uint64
test bool
_ cpu.CacheLinePad
}

gcStatsAggregate struct #

gcStatsAggregate represents various GC stats obtained from the runtime acquired together to avoid skew and inconsistencies.

type gcStatsAggregate struct {
heapScan uint64
stackScan uint64
globalsScan uint64
totalScan uint64
}

gcTrigger struct #

A gcTrigger is a predicate for starting a GC cycle. Specifically, it is an exit condition for the _GCoff phase.

type gcTrigger struct {
kind gcTriggerKind
now int64
n uint32
}

gcWork struct #

A gcWork provides the interface to produce and consume work for the garbage collector. A gcWork can be used on the stack as follows: (preemption must be disabled) gcw := &getg().m.p.ptr().gcw .. call gcw.put() to produce and gcw.tryGet() to consume .. It's important that any use of gcWork during the mark phase prevent the garbage collector from transitioning to mark termination since gcWork may locally hold GC work buffers. This can be done by disabling preemption (systemstack or acquirem).

type gcWork struct {
wbuf1 *workbuf
wbuf2 *workbuf
bytesMarked uint64
heapScanWork int64
flushedWork bool
}

gobuf struct #

type gobuf struct {
sp uintptr
pc uintptr
g guintptr
ctxt unsafe.Pointer
ret uintptr
lr uintptr
bp uintptr
}

godebugInc struct #

A godebugInc provides access to internal/godebug's IncNonDefault function for a given GODEBUG setting. Calls before internal/godebug registers itself are dropped on the floor.

type godebugInc struct {
name string
inc *ast.IndexExpr
}

gpregs struct #

type gpregs struct {
gp_ra uint64
gp_sp uint64
gp_gp uint64
gp_tp uint64
gp_t [7]uint64
gp_s [12]uint64
gp_a [8]uint64
gp_sepc uint64
gp_sstatus uint64
}

gpregs struct #

type gpregs struct {
gp_x [30]uint64
gp_lr uint64
gp_sp uint64
gp_elr uint64
gp_spsr uint32
gp_pad int32
}

gsignalStack struct #

gsignalStack is unused on js.

type gsignalStack struct {

}

gsignalStack struct #

gsignalStack is unused on Plan 9.

type gsignalStack struct {

}

gsignalStack struct #

gsignalStack saves the fields of the gsignal stack changed by setGsignalStack.

type gsignalStack struct {
stack stack
stackguard0 uintptr
stackguard1 uintptr
stktopsp uintptr
}

gsignalStack struct #

gsignalStack is unused on Windows.

type gsignalStack struct {

}

hchan struct #

type hchan struct {
qcount uint
dataqsiz uint
buf unsafe.Pointer
elemsize uint16
synctest bool
closed uint32
timer *timer
elemtype *_type
sendx uint
recvx uint
recvq waitq
sendq waitq
lock mutex
}

heapArena struct #

A heapArena stores metadata for a heap arena. heapArenas are stored outside of the Go heap and accessed via the mheap_.arenas index.

type heapArena struct {
_ sys.NotInHeap
spans [pagesPerArena]*mspan
pageInUse [*ast.BinaryExpr]uint8
pageMarks [*ast.BinaryExpr]uint8
pageSpecials [*ast.BinaryExpr]uint8
checkmarks *checkmarksMap
zeroedBase uintptr
}

heapStatsAggregate struct #

heapStatsAggregate represents memory stats obtained from the runtime. This set of stats is grouped together because they depend on each other in some way to make sense of the runtime's current heap memory use. They're also sharded across Ps, so it makes sense to grab them all at once.

type heapStatsAggregate struct {
heapStatsDelta
inObjects uint64
numObjects uint64
totalAllocated uint64
totalFreed uint64
totalAllocs uint64
totalFrees uint64
}

heapStatsDelta struct #

heapStatsDelta contains deltas of various runtime memory statistics that need to be updated together in order for them to be kept consistent with one another.

type heapStatsDelta struct {
committed int64
released int64
inHeap int64
inStacks int64
inWorkBufs int64
inPtrScalarBits int64
tinyAllocCount uint64
largeAlloc uint64
largeAllocCount uint64
smallAllocCount [_NumSizeClasses]uint64
largeFree uint64
largeFreeCount uint64
smallFreeCount [_NumSizeClasses]uint64
}

heldLockInfo struct #

heldLockInfo gives info on a held lock and the rank of that lock

type heldLockInfo struct {
lockAddr uintptr
rank lockRank
}

hiter struct #

A hash iteration structure. If you modify hiter, also change cmd/compile/internal/reflectdata/reflect.go and reflect/value.go to match the layout of this structure.

type hiter struct {
key unsafe.Pointer
elem unsafe.Pointer
t *maptype
h *hmap
buckets unsafe.Pointer
bptr *bmap
overflow *[]*bmap
oldoverflow *[]*bmap
startBucket uintptr
offset uint8
wrapped bool
B uint8
i uint8
bucket uintptr
checkBucket uintptr
clearSeq uint64
}

hmap struct #

A header for a Go map.

type hmap struct {
count int
flags uint8
B uint8
noverflow uint16
hash0 uint32
buckets unsafe.Pointer
oldbuckets unsafe.Pointer
nevacuate uintptr
clearSeq uint64
extra *mapextra
}

iface struct #

type iface struct {
tab *itab
data unsafe.Pointer
}

initTask struct #

An initTask represents the set of initializations that need to be done for a package. Keep in sync with ../../test/noinit.go:initTask

type initTask struct {
state uint32
nfns uint32
}

inlineFrame struct #

An inlineFrame is a position in an inlineUnwinder.

type inlineFrame struct {
pc uintptr
index int32
}

inlineUnwinder struct #

An inlineUnwinder iterates over the stack of inlined calls at a PC by decoding the inline table. The last step of iteration is always the frame of the physical function, so there's always at least one frame. This is typically used as: for u, uf := newInlineUnwinder(...); uf.valid(); uf = u.next(uf) { ... } Implementation note: This is used in contexts that disallow write barriers. Hence, the constructor returns this by value and pointer receiver methods must not mutate pointer fields. Also, we keep the mutable state in a separate struct mostly to keep both structs SSA-able, which generates much better code.

type inlineUnwinder struct {
f funcInfo
inlTree *[*ast.BinaryExpr]inlinedCall
}

inlinedCall struct #

inlinedCall is the encoding of entries in the FUNCDATA_InlTree table.

type inlinedCall struct {
funcID abi.FuncID
_ [3]byte
nameOff int32
parentPc int32
startLine int32
}

iovec struct #

https://github.com/WebAssembly/WASI/blob/a2b96e81c0586125cc4dc79a5be0b78d9a059925/legacy/preview1/docs.md#-iovec-record

type iovec struct {
buf uintptr32
bufLen size
}

itabTableType struct #

Note: change the formula in the mallocgc call in itabAdd if you change these fields.

type itabTableType struct {
size uintptr
count uintptr
entries [itabInitSize]*itab
}

itimerspec struct #

type itimerspec struct {
it_interval timespec
it_value timespec
}

itimerspec struct #

type itimerspec struct {
it_interval timespec
it_value timespec
}

itimerspec struct #

type itimerspec struct {
it_interval timespec
it_value timespec
}

itimerspec struct #

type itimerspec struct {
it_interval timespec
it_value timespec
}

itimerspec struct #

type itimerspec struct {
it_interval timespec
it_value timespec
}

itimerspec struct #

type itimerspec struct {
it_interval timespec
it_value timespec
}

itimerspec struct #

type itimerspec struct {
it_interval timespec
it_value timespec
}

itimerspec struct #

type itimerspec struct {
it_interval timespec
it_value timespec
}

itimerspec struct #

type itimerspec struct {
it_interval timespec
it_value timespec
}

itimerspec struct #

type itimerspec struct {
it_interval timespec
it_value timespec
}

itimerspec struct #

type itimerspec struct {
it_interval timespec
it_value timespec
}

itimerval struct #

type itimerval struct {
it_interval timeval
it_value timeval
}

itimerval struct #

type itimerval struct {
it_interval timeval
it_value timeval
}

itimerval struct #

type itimerval struct {
it_interval timeval
it_value timeval
}

itimerval struct #

type itimerval struct {
it_interval timeval
it_value timeval
}

itimerval struct #

type itimerval struct {
it_interval timeval
it_value timeval
}

itimerval struct #

type itimerval struct {
it_interval timeval
it_value timeval
}

itimerval struct #

type itimerval struct {
it_interval timeval
it_value timeval
}

itimerval struct #

type itimerval struct {
it_interval timeval
it_value timeval
}

itimerval struct #

type itimerval struct {
it_interval timeval
it_value timeval
}

itimerval struct #

type itimerval struct {
it_interval timeval
it_value timeval
}

itimerval struct #

type itimerval struct {
it_interval timeval
it_value timeval
}

itimerval struct #

type itimerval struct {
it_interval timeval
it_value timeval
}

itimerval struct #

type itimerval struct {
it_interval timeval
it_value timeval
}

itimerval struct #

type itimerval struct {
it_interval timeval
it_value timeval
}

itimerval struct #

type itimerval struct {
it_interval timeval
it_value timeval
}

itimerval struct #

type itimerval struct {
it_interval timeval
it_value timeval
}

itimerval struct #

type itimerval struct {
it_interval timeval
it_value timeval
}

itimerval struct #

type itimerval struct {
it_interval timeval
it_value timeval
}

itimerval struct #

type itimerval struct {
it_interval timeval
it_value timeval
}

itimerval struct #

type itimerval struct {
it_interval timeval
it_value timeval
}

itimerval struct #

type itimerval struct {
it_interval timeval
it_value timeval
}

itimerval struct #

type itimerval struct {
it_interval timeval
it_value timeval
}

itimerval struct #

type itimerval struct {
it_interval timeval
it_value timeval
}

itimerval struct #

type itimerval struct {
it_interval timeval
it_value timeval
}

itimerval struct #

type itimerval struct {
it_interval timeval
it_value timeval
}

itimerval struct #

type itimerval struct {
it_interval timeval
it_value timeval
}

itimerval struct #

type itimerval struct {
it_interval timeval
it_value timeval
}

itimerval struct #

type itimerval struct {
it_interval timeval
it_value timeval
}

itimerval struct #

type itimerval struct {
it_interval timeval
it_value timeval
}

itimerval struct #

type itimerval struct {
it_interval timeval
it_value timeval
}

itimerval struct #

type itimerval struct {
it_interval timeval
it_value timeval
}

itimerval struct #

type itimerval struct {
it_interval timeval
it_value timeval
}

keventt struct #

type keventt struct {
ident uint32
filter int16
flags uint16
fflags uint32
pad_cgo_0 [4]byte
data int64
udata *byte
pad_cgo_1 [4]byte
ext [4]uint64
}

keventt struct #

type keventt struct {
ident uint32
filter int16
flags uint16
fflags uint32
data int64
udata *byte
}

keventt struct #

type keventt struct {
ident uint64
filter int16
flags uint16
fflags uint32
data int64
udata *byte
ext [4]uint64
}

keventt struct #

type keventt struct {
ident uint32
filter int16
flags uint16
fflags uint32
pad_cgo_0 [4]byte
data int64
udata *byte
pad_cgo_1 [4]byte
}

keventt struct #

type keventt struct {
ident uint64
filter int16
flags uint16
fflags uint32
data int64
udata *byte
}

keventt struct #

type keventt struct {
ident uint64
filter uint32
flags uint32
fflags uint32
pad_cgo_0 [4]byte
data int64
udata *byte
}

keventt struct #

type keventt struct {
ident uint64
filter int16
flags uint16
fflags uint32
data int64
udata *byte
}

keventt struct #

type keventt struct {
ident uint64
filter int16
flags uint16
fflags uint32
data int64
udata *byte
}

keventt struct #

type keventt struct {
ident uint64
filter int16
flags uint16
fflags uint32
data int64
udata *byte
}

keventt struct #

type keventt struct {
ident uint32
filter uint32
flags uint32
fflags uint32
data int64
udata *byte
_ [4]byte
}

keventt struct #

type keventt struct {
ident uint64
filter int16
flags uint16
fflags uint32
data int64
udata *byte
}

keventt struct #

type keventt struct {
ident uint64
filter uint32
flags uint32
fflags uint32
pad_cgo_0 [4]byte
data int64
udata *byte
}

keventt struct #

type keventt struct {
ident uint64
filter int16
flags uint16
fflags uint32
data int64
udata *byte
}

keventt struct #

type keventt struct {
ident uint64
filter int16
flags uint16
fflags uint32
data int64
udata *byte
}

keventt struct #

type keventt struct {
ident uint32
filter int16
flags uint16
fflags uint32
data int64
udata *byte
ext [4]uint64
}

keventt struct #

type keventt struct {
ident uint64
filter int16
flags uint16
fflags uint32
data int64
udata *byte
ext [4]uint64
}

keventt struct #

type keventt struct {
ident uint64
filter int16
flags uint16
fflags uint32
data int64
udata *byte
}

keventt struct #

type keventt struct {
ident uint64
filter int16
flags uint16
fflags uint32
data int64
udata *byte
ext [4]uint64
}

keventt struct #

type keventt struct {
ident uint32
filter uint32
flags uint32
fflags uint32
data int64
udata *byte
}

lfnode struct #

Lock-free stack node. Also known to export_test.go.

type lfnode struct {
next uint64
pushcnt uintptr
}

libcall struct #

type libcall struct {
fn uintptr
n uintptr
args uintptr
r1 uintptr
r2 uintptr
err uintptr
}

limiterEvent struct #

limiterEvent represents tracking state for an event tracked by the GC CPU limiter.

type limiterEvent struct {
stamp atomic.Uint64
}

linearAlloc struct #

linearAlloc is a simple linear allocator that pre-reserves a region of memory and then optionally maps that region into the Ready state as needed. The caller is responsible for locking.

type linearAlloc struct {
next uintptr
mapped uintptr
end uintptr
mapMemory bool
}

linknameIter struct #

linknameIter is the it argument to mapiterinit and mapiternext. Callers of mapiterinit allocate their own iter structure, which has the layout of the pre-Go 1.24 hiter structure, shown here for posterity: type hiter struct { key unsafe.Pointer elem unsafe.Pointer t *maptype h *hmap buckets unsafe.Pointer bptr *bmap overflow *[]*bmap oldoverflow *[]*bmap startBucket uintptr offset uint8 wrapped bool B uint8 i uint8 bucket uintptr checkBucket uintptr } Our structure must maintain compatibility with the old structure. This means: - Our structure must be the same size or smaller than hiter. Otherwise we may write outside the caller's hiter allocation. - Our structure must have the same pointer layout as hiter, so that the GC tracks pointers properly. Based on analysis of the "hall of shame" users of these linknames: - The key and elem fields must be kept up to date with the current key/elem. Some users directly access the key and elem fields rather than calling reflect.mapiterkey/reflect.mapiterelem. - The t field must be non-nil after mapiterinit. gonum.org/v1/gonum uses this to verify the iterator is initialized. - github.com/segmentio/encoding and github.com/RomiChan/protobuf check if h is non-nil, but the code has no effect. Thus the value of h does not matter. See internal/runtime_reflect/map.go.

type linknameIter struct {
key unsafe.Pointer
elem unsafe.Pointer
typ *abi.SwissMapType
it *maps.Iter
}

liveUserArenaChunk struct #

type liveUserArenaChunk struct {
*mspan
x unsafe.Pointer
}

lockRankStruct struct #

// lockRankStruct is embedded in mutex, but is empty when staticklockranking is disabled (the default)

type lockRankStruct struct {

}

lockRankStruct struct #

lockRankStruct is embedded in mutex

type lockRankStruct struct {
rank lockRank
pad int
}

lockTimer struct #

lockTimer assists with profiling contention on runtime-internal locks. There are several steps between the time that an M experiences contention and when that contention may be added to the profile. This comes from our constraints: We need to keep the critical section of each lock small, especially when those locks are contended. The reporting code cannot acquire new locks until the M has released all other locks, which means no memory allocations and encourages use of (temporary) M-local storage. The M will have space for storing one call stack that caused contention, and for the magnitude of that contention. It will also have space to store the magnitude of additional contention the M caused, since it only has space to remember one call stack and might encounter several contention events before it releases all of its locks and is thus able to transfer the local buffer into the profile. The M will collect the call stack when it unlocks the contended lock. That minimizes the impact on the critical section of the contended lock, and matches the mutex profile's behavior for contention in sync.Mutex: measured at the Unlock method. The profile for contention on sync.Mutex blames the caller of Unlock for the amount of contention experienced by the callers of Lock which had to wait. When there are several critical sections, this allows identifying which of them is responsible. Matching that behavior for runtime-internal locks will require identifying which Ms are blocked on the mutex. The semaphore-based implementation is ready to allow that, but the futex-based implementation will require a bit more work. Until then, we report contention on runtime-internal locks with a call stack taken from the unlock call (like the rest of the user-space "mutex" profile), but assign it a duration value based on how long the previous lock call took (like the user-space "block" profile). Thus, reporting the call stacks of runtime-internal lock contention is guarded by GODEBUG for now. Set GODEBUG=runtimecontentionstacks=1 to enable. TODO(rhysh): plumb through the delay duration, remove GODEBUG, update comment The M will track this by storing a pointer to the lock; lock/unlock pairs for runtime-internal locks are always on the same M. Together, that demands several steps for recording contention. First, when finally acquiring a contended lock, the M decides whether it should plan to profile that event by storing a pointer to the lock in its "to be profiled upon unlock" field. If that field is already set, it uses the relative magnitudes to weight a random choice between itself and the other lock, with the loser's time being added to the "additional contention" field. Otherwise if the M's call stack buffer is occupied, it does the comparison against that sample's magnitude. Second, having unlocked a mutex the M checks to see if it should capture the call stack into its local buffer. Finally, when the M unlocks its last mutex, it transfers the local buffer into the profile. As part of that step, it also transfers any "additional contention" time to the profile. Any lock contention that it experiences while adding samples to the profile will be recorded later as "additional contention" and not include a call stack, to avoid an echo.

type lockTimer struct {
lock *mutex
timeRate int64
timeStart int64
tickStart int64
}

lwpparams struct #

type lwpparams struct {
start_func uintptr
arg unsafe.Pointer
stack uintptr
tid1 unsafe.Pointer
tid2 unsafe.Pointer
}

m struct #

type m struct {
g0 *g
morebuf gobuf
divmod uint32
_ uint32
procid uint64
gsignal *g
goSigStack gsignalStack
sigmask sigset
tls [tlsSlots]uintptr
mstartfn func()
curg *g
caughtsig guintptr
p puintptr
nextp puintptr
oldp puintptr
id int64
mallocing int32
throwing throwType
preemptoff string
locks int32
dying int32
profilehz int32
spinning bool
blocked bool
newSigstack bool
printlock int8
incgo bool
isextra bool
isExtraInC bool
isExtraInSig bool
freeWait atomic.Uint32
needextram bool
g0StackAccurate bool
traceback uint8
ncgocall uint64
ncgo int32
cgoCallersUse atomic.Uint32
cgoCallers *cgoCallers
park note
alllink *m
schedlink muintptr
lockedg guintptr
createstack [32]uintptr
lockedExt uint32
lockedInt uint32
mWaitList mWaitList
mLockProfile mLockProfile
profStack []uintptr
waitunlockf func(*g, unsafe.Pointer) bool
waitlock unsafe.Pointer
waitTraceSkip int
waitTraceBlockReason traceBlockReason
syscalltick uint32
freelink *m
trace mTraceState
libcall libcall
libcallpc uintptr
libcallsp uintptr
libcallg guintptr
winsyscall winlibcall
vdsoSP uintptr
vdsoPC uintptr
preemptGen atomic.Uint32
signalPending atomic.Uint32
pcvalueCache pcvalueCache
dlogPerM
mOS
chacha8 chacha8rand.State
cheaprand uint64
locksHeldLen int
locksHeld [10]heldLockInfo
_ [*ast.BinaryExpr]byte
}

m0Stack struct #

type m0Stack struct {
_ [*ast.BinaryExpr]byte
}

m128a struct #

type m128a struct {
low uint64
high int64
}

mLockProfile struct #

type mLockProfile struct {
waitTime atomic.Int64
stack []uintptr
pending uintptr
cycles int64
cyclesLost int64
haveStack bool
disabled bool
}

mOS struct #

type mOS struct {
profileTimer int32
profileTimerValid atomic.Bool
needPerThreadSyscall atomic.Uint8
vgetrandomState uintptr
waitsema uint32
}

mOS struct #

type mOS struct {
waitsema uintptr
perrno uintptr
}

mOS struct #

type mOS struct {
waitsemacount uint32
notesig *int8
errstr *byte
ignoreHangup bool
}

mOS struct #

type mOS struct {
waitsemacount uint32
}

mOS struct #

type mOS struct {
waitsema uintptr
perrno *int32
ts mts
scratch mscratch
}

mOS struct #

type mOS struct {
waitsema uint32
}

mOS struct #

type mOS struct {
threadLock mutex
thread uintptr
waitsema uintptr
resumesema uintptr
highResTimer uintptr
waitIocpTimer uintptr
waitIocpHandle uintptr
preemptExtLock uint32
}

mOS struct #

type mOS struct {
waitsemacount uint32
}

mOS struct #

type mOS struct {
initialized bool
mutex pthreadmutex
cond pthreadcond
count int
}

mOS struct #

type mOS struct {
waitsema uint32
}

mOS struct #

type mOS struct {

}

mProfCycleHolder struct #

mProfCycleHolder holds the global heap profile cycle number (wrapped at mProfCycleWrap, stored starting at bit 1), and a flag (stored at bit 0) to indicate whether future[cycle] in all buckets has been queued to flush into the active profile.

type mProfCycleHolder struct {
value atomic.Uint32
}

mSpanList struct #

mSpanList heads a linked list of spans.

type mSpanList struct {
_ sys.NotInHeap
first *mspan
last *mspan
}

mSpanStateBox struct #

mSpanStateBox holds an atomic.Uint8 to provide atomic operations on an mSpanState. This is a separate type to disallow accidental comparison or assignment with mSpanState.

type mSpanStateBox struct {
s atomic.Uint8
}

mTraceState struct #

mTraceState is per-M state for the tracer.

type mTraceState struct {
seqlock atomic.Uintptr
buf [2][traceNumExperiments]*traceBuf
link *m
reentered uint32
oldthrowsplit bool
}

mWaitList struct #

type mWaitList struct {

}

mWaitList struct #

mWaitList is part of the M struct, and holds the list of Ms that are waiting for a particular runtime.mutex. When an M is unable to immediately obtain a lock, it adds itself to the list of Ms waiting for the lock. It does that via this struct's next field, forming a singly-linked list with the mutex's key field pointing to the head of the list.

type mWaitList struct {
next muintptr
}

mWaitList struct #

type mWaitList struct {

}

mWaitList struct #

mWaitList is part of the M struct, and holds the list of Ms that are waiting for a particular runtime.mutex. When an M is unable to immediately obtain a lock, it adds itself to the list of Ms waiting for the lock. It does that via this struct's next field, forming a singly-linked list with the mutex's key field pointing to the head of the list.

type mWaitList struct {
next muintptr
}

mWaitList struct #

type mWaitList struct {

}

machTimebaseInfo struct #

type machTimebaseInfo struct {
numer uint32
denom uint32
}

machTimebaseInfo struct #

type machTimebaseInfo struct {
numer uint32
denom uint32
}

mapextra struct #

mapextra holds fields that are not present on all maps.

type mapextra struct {
overflow *[]*bmap
oldoverflow *[]*bmap
nextOverflow *bmap
}

markBits struct #

markBits provides access to the mark bit for an object in the heap. bytep points to the byte holding the mark bit. mask is a byte with a single bit set that can be &ed with *bytep to see if the bit has been set. *m.byte&m.mask != 0 indicates the mark bit is set. index can be used along with span information to generate the address of the object in the heap. We maintain one set of mark bits for allocation and one for marking purposes.

type markBits struct {
bytep *uint8
mask uint8
index uintptr
}

mcache struct #

Per-thread (in Go, per-P) cache for small objects. This includes a small object cache and local allocation stats. No locking needed because it is per-thread (per-P). mcaches are allocated from non-GC'd memory, so any heap pointers must be specially handled.

type mcache struct {
_ sys.NotInHeap
nextSample int64
memProfRate int
scanAlloc uintptr
tiny uintptr
tinyoffset uintptr
tinyAllocs uintptr
alloc [numSpanClasses]*mspan
stackcache [_NumStackOrders]stackfreelist
flushGen atomic.Uint32
}

mcentral struct #

Central list of free objects of a given size.

type mcentral struct {
_ sys.NotInHeap
spanclass spanClass
partial [2]spanSet
full [2]spanSet
}

mcontext struct #

type mcontext struct {
__gregs [17]uint32
__fpu [140]byte
}

mcontext struct #

type mcontext struct {
mc_gpregs gpregs
mc_fpregs fpregs
mc_flags int32
mc_pad int32
mc_spare [8]uint64
}

mcontext struct #

type mcontext struct {
mc_onstack uint32
mc_gs uint32
mc_fs uint32
mc_es uint32
mc_ds uint32
mc_edi uint32
mc_esi uint32
mc_ebp uint32
mc_isp uint32
mc_ebx uint32
mc_edx uint32
mc_ecx uint32
mc_eax uint32
mc_trapno uint32
mc_err uint32
mc_eip uint32
mc_cs uint32
mc_eflags uint32
mc_esp uint32
mc_ss uint32
mc_len uint32
mc_fpformat uint32
mc_ownedfp uint32
mc_flags uint32
mc_fpstate [128]uint32
mc_fsbase uint32
mc_gsbase uint32
mc_xfpustate uint32
mc_xfpustate_len uint32
mc_spare2 [4]uint32
}

mcontext struct #

type mcontext struct {
mc_gpregs gpregs
mc_fpregs fpregs
mc_flags int32
mc_pad int32
mc_spare [8]uint64
}

mcontext struct #

type mcontext struct {
gregs [28]int64
fpregs fpregset
}

mcontext struct #

type mcontext struct {
gregs [23]uint64
fpregs *fpstate
__reserved1 [8]uint64
}

mcontext struct #

type mcontext struct {
mc_onstack uint64
mc_rdi uint64
mc_rsi uint64
mc_rdx uint64
mc_rcx uint64
mc_r8 uint64
mc_r9 uint64
mc_rax uint64
mc_rbx uint64
mc_rbp uint64
mc_r10 uint64
mc_r11 uint64
mc_r12 uint64
mc_r13 uint64
mc_r14 uint64
mc_r15 uint64
mc_trapno uint32
mc_fs uint16
mc_gs uint16
mc_addr uint64
mc_flags uint32
mc_es uint16
mc_ds uint16
mc_err uint64
mc_rip uint64
mc_cs uint64
mc_rflags uint64
mc_rsp uint64
mc_ss uint64
mc_len uint64
mc_fpformat uint64
mc_ownedfp uint64
mc_fpstate [64]uint64
mc_fsbase uint64
mc_gsbase uint64
mc_xfpustate uint64
mc_xfpustate_len uint64
mc_spare [4]uint64
}

mcontext struct #

type mcontext struct {
mc_onstack uint64
mc_rdi uint64
mc_rsi uint64
mc_rdx uint64
mc_rcx uint64
mc_r8 uint64
mc_r9 uint64
mc_rax uint64
mc_rbx uint64
mc_rbp uint64
mc_r10 uint64
mc_r11 uint64
mc_r12 uint64
mc_r13 uint64
mc_r14 uint64
mc_r15 uint64
mc_xflags uint64
mc_trapno uint64
mc_addr uint64
mc_flags uint64
mc_err uint64
mc_rip uint64
mc_cs uint64
mc_rflags uint64
mc_rsp uint64
mc_ss uint64
mc_len uint32
mc_fpformat uint32
mc_ownedfp uint32
mc_reserved uint32
mc_unused [8]uint32
mc_fpregs [256]int32
}

mcontext32 struct #

type mcontext32 struct {
es exceptionstate32
ss regs32
fs floatstate32
}

mcontext64 struct #

type mcontext64 struct {
es exceptionstate64
ss regs64
fs floatstate64
pad_cgo_0 [4]byte
}

mcontext64 struct #

type mcontext64 struct {
es exceptionstate64
ss regs64
ns neonstate64
}

mcontextt struct #

type mcontextt struct {
__gregs [26]uint64
_mc_tlsbase uint64
__fpregs [512]int8
}

mcontextt struct #

type mcontextt struct {
__gregs [35]uint64
__fregs [4160]byte
_ [8]uint64
}

mcontextt struct #

type mcontextt struct {
__gregs [17]uint32
_ [4]byte
__fpu [272]byte
_mc_tlsbase uint32
_ [4]byte
}

mcontextt struct #

type mcontextt struct {
__gregs [19]uint32
__fpregs [644]byte
_mc_tlsbase int32
}

memHdr struct #

type memHdr struct {
next memHdrPtr
size uintptr
}

memRecord struct #

A memRecord is the bucket data for a bucket of type memProfile, part of the memory profile.

type memRecord struct {
active memRecordCycle
future [3]memRecordCycle
}

memRecordCycle struct #

memRecordCycle

type memRecordCycle struct {
allocs uintptr
frees uintptr
alloc_bytes uintptr
free_bytes uintptr
}

memoryBasicInformation struct #

type memoryBasicInformation struct {
baseAddress uintptr
allocationBase uintptr
allocationProtect uint32
regionSize uintptr
state uint32
protect uint32
type_ uint32
}

metricData struct #

type metricData struct {
deps statDepSet
compute func(in *statAggregate, out *metricValue)
}

metricFloat64Histogram struct #

metricFloat64Histogram is a runtime copy of runtime/metrics.Float64Histogram and must be kept structurally identical to that type.

type metricFloat64Histogram struct {
counts []uint64
buckets []float64
}

metricName struct #

type metricName struct {
name string
kind metricKind
}

metricSample struct #

metricSample is a runtime copy of runtime/metrics.Sample and must be kept structurally identical to that type.

type metricSample struct {
name string
value metricValue
}

metricValue struct #

metricValue is a runtime copy of runtime/metrics.Sample and must be kept structurally identical to that type.

type metricValue struct {
kind metricKind
scalar uint64
pointer unsafe.Pointer
}

mheap struct #

Main malloc heap. The heap itself is the "free" and "scav" treaps, but all the other global data is here too. mheap must not be heap-allocated because it contains mSpanLists, which must not be heap-allocated.

type mheap struct {
_ sys.NotInHeap
lock mutex
pages pageAlloc
sweepgen uint32
allspans []*mspan
pagesInUse atomic.Uintptr
pagesSwept atomic.Uint64
pagesSweptBasis atomic.Uint64
sweepHeapLiveBasis uint64
sweepPagesPerByte float64
reclaimIndex atomic.Uint64
reclaimCredit atomic.Uintptr
_ cpu.CacheLinePad
arenas [*ast.BinaryExpr]*[*ast.BinaryExpr]*heapArena
arenasHugePages bool
heapArenaAlloc linearAlloc
arenaHints *arenaHint
arena linearAlloc
allArenas []arenaIdx
sweepArenas []arenaIdx
markArenas []arenaIdx
curArena struct{...}
central [numSpanClasses]struct{...}
spanalloc fixalloc
cachealloc fixalloc
specialfinalizeralloc fixalloc
specialCleanupAlloc fixalloc
specialprofilealloc fixalloc
specialReachableAlloc fixalloc
specialPinCounterAlloc fixalloc
specialWeakHandleAlloc fixalloc
speciallock mutex
arenaHintAlloc fixalloc
userArena struct{...}
cleanupID uint64
unused *specialfinalizer
}

moduledata struct #

moduledata records information about the layout of the executable image. It is written by the linker. Any changes here must be matched changes to the code in cmd/link/internal/ld/symtab.go:symtab. moduledata is stored in statically allocated non-pointer memory; none of the pointers here are visible to the garbage collector.

type moduledata struct {
sys.NotInHeap
pcHeader *pcHeader
funcnametab []byte
cutab []uint32
filetab []byte
pctab []byte
pclntable []byte
ftab []functab
findfunctab uintptr
minpc uintptr
maxpc uintptr
text uintptr
etext uintptr
noptrdata uintptr
enoptrdata uintptr
data uintptr
edata uintptr
bss uintptr
ebss uintptr
noptrbss uintptr
enoptrbss uintptr
covctrs uintptr
ecovctrs uintptr
end uintptr
gcdata uintptr
gcbss uintptr
types uintptr
etypes uintptr
rodata uintptr
gofunc uintptr
textsectmap []textsect
typelinks []int32
itablinks []*itab
ptab []ptabEntry
pluginpath string
pkghashes []modulehash
inittasks []*initTask
modulename string
modulehashes []modulehash
hasmain uint8
bad bool
gcdatamask bitvector
gcbssmask bitvector
typemap map[typeOff]*_type
next *moduledata
}

modulehash struct #

A modulehash is used to compare the ABI of a new module or a package in a new module with the loaded program. For each shared library a module links against, the linker creates an entry in the moduledata.modulehashes slice containing the name of the module, the abi hash seen at link time and a pointer to the runtime abi hash. These are checked in moduledataverify1 below. For each loaded plugin, the pkghashes slice has a modulehash of the newly loaded package that can be used to check the plugin's version of a package against any previously loaded version of the package. This is done in plugin.lastmoduleinit.

type modulehash struct {
modulename string
linktimehash string
runtimehash *string
}

mscratch struct #

type mscratch struct {
v [6]uintptr
}

mspan struct #

type mspan struct {
_ sys.NotInHeap
next *mspan
prev *mspan
list *mSpanList
startAddr uintptr
npages uintptr
manualFreeList gclinkptr
freeindex uint16
nelems uint16
freeIndexForScan uint16
allocCache uint64
allocBits *gcBits
gcmarkBits *gcBits
pinnerBits *gcBits
sweepgen uint32
divMul uint32
allocCount uint16
spanclass spanClass
state mSpanStateBox
needzero uint8
isUserArenaChunk bool
allocCountBeforeCache uint16
elemsize uintptr
limit uintptr
speciallock mutex
specials *special
userArenaChunkFree addrRange
largeType *_type
}

mstats struct #

type mstats struct {
heapStats consistentHeapStats
stacks_sys sysMemStat
mspan_sys sysMemStat
mcache_sys sysMemStat
buckhash_sys sysMemStat
gcMiscSys sysMemStat
other_sys sysMemStat
last_gc_unix uint64
pause_total_ns uint64
pause_ns [256]uint64
pause_end [256]uint64
numgc uint32
numforcedgc uint32
gc_cpu_fraction float64
last_gc_nanotime uint64
lastHeapInUse uint64
enablegc bool
}

mts struct #

type mts struct {
tv_sec int64
tv_nsec int64
}

mutex struct #

Mutual exclusion locks. In the uncontended case, as fast as spin locks (just a few user-level instructions), but on the contention path they sleep in the kernel. A zeroed Mutex is unlocked (no need to initialize each lock). Initialization is helpful for static lock ranking, but not required.

type mutex struct {
lockRankStruct
key uintptr
}

neon128 struct #

type neon128 struct {
low uint64
high int64
}

neon128 struct #

type neon128 struct {
low uint64
high int64
}

neonstate64 struct #

type neonstate64 struct {
v [64]uint64
fpsr uint32
fpcr uint32
}

neverCallThisFunction struct #

type neverCallThisFunction struct {

}

notInHeap struct #

notInHeap is off-heap memory allocated by a lower-level allocator like sysAlloc or persistentAlloc. In general, it's better to use real types which embed internal/runtime/sys.NotInHeap, but this serves as a generic type for situations where that isn't possible (like in the allocators). TODO: Use this as the return type of sysAlloc, persistentAlloc, etc?

type notInHeap struct {
_ sys.NotInHeap
}

notInHeapSlice struct #

A notInHeapSlice is a slice backed by internal/runtime/sys.NotInHeap memory.

type notInHeapSlice struct {
array *notInHeap
len int
cap int
}

note struct #

sleep and wakeup on one-time events. before any calls to notesleep or notewakeup, must call noteclear to initialize the Note. then, exactly one thread can call notesleep and exactly one thread can call notewakeup (once). once notewakeup has been called, the notesleep will return. future notesleep will return immediately. subsequent noteclear must be called only after previous notesleep has returned, e.g. it's disallowed to call noteclear straight after notewakeup. notetsleep is like notesleep but wakes up after a given number of nanoseconds even if the event has not yet happened. if a goroutine uses notetsleep to wake up early, it must wait to call noteclear until it can be sure that no other goroutine is calling notewakeup. notesleep/notetsleep are generally called on g0, notetsleepg is similar to notetsleep but is called on user g.

type note struct {
key uintptr
}

note struct #

sleep and wakeup on one-time events. before any calls to notesleep or notewakeup, must call noteclear to initialize the Note. then, exactly one thread can call notesleep and exactly one thread can call notewakeup (once). once notewakeup has been called, the notesleep will return. future notesleep will return immediately. subsequent noteclear must be called only after previous notesleep has returned, e.g. it's disallowed to call noteclear straight after notewakeup. notetsleep is like notesleep but wakes up after a given number of nanoseconds even if the event has not yet happened. if a goroutine uses notetsleep to wake up early, it must wait to call noteclear until it can be sure that no other goroutine is calling notewakeup. notesleep/notetsleep are generally called on g0, notetsleepg is similar to notetsleep but is called on user g.

type note struct {
status int32
gp *g
deadline int64
allprev *note
allnext *note
}

noteData struct #

type noteData struct {
s [_ERRMAX]byte
n int
}

noteQueue struct #

type noteQueue struct {
lock mutex
data [qsize]noteData
ri int
wi int
full bool
}

notifyList struct #

notifyList is a ticket-based notification list used to implement sync.Cond. It must be kept in sync with the sync package.

type notifyList struct {
wait atomic.Uint32
notify uint32
lock mutex
head *sudog
tail *sudog
}

offAddr struct #

offAddr represents an address in a contiguous view of the address space on systems where the address space is segmented. On other systems, it's just a normal address.

type offAddr struct {
a uintptr
}

overlapped struct #

type overlapped struct {
internal uintptr
internalhigh uintptr
anon0 [8]byte
hevent *byte
}

overlappedEntry struct #

overlappedEntry contains the information returned by a call to GetQueuedCompletionStatusEx. https://learn.microsoft.com/en-us/windows/win32/api/minwinbase/ns-minwinbase-overlapped_entry

type overlappedEntry struct {
key uintptr
ov *overlapped
internal uintptr
qty uint32
}

p struct #

type p struct {
id int32
status uint32
link puintptr
schedtick uint32
syscalltick uint32
sysmontick sysmontick
m muintptr
mcache *mcache
pcache pageCache
raceprocctx uintptr
deferpool []*_defer
deferpoolbuf [32]*_defer
goidcache uint64
goidcacheend uint64
runqhead uint32
runqtail uint32
runq [256]guintptr
runnext guintptr
gFree struct{...}
sudogcache []*sudog
sudogbuf [128]*sudog
mspancache struct{...}
pinnerCache *pinner
trace pTraceState
palloc persistentAlloc
gcAssistTime int64
gcFractionalMarkTime int64
limiterEvent limiterEvent
gcMarkWorkerMode gcMarkWorkerMode
gcMarkWorkerStartTime int64
gcw gcWork
wbBuf wbBuf
runSafePointFn uint32
statsSeq atomic.Uint32
timers timers
maxStackScanDelta int64
scannedStackSize uint64
scannedStacks uint64
preempt bool
gcStopTime int64
}

pTraceState struct #

pTraceState is per-P state for the tracer.

type pTraceState struct {
traceSchedResourceState
mSyscallID int64
maySweep bool
inSweep bool
swept uintptr
reclaimed uintptr
}

pageAlloc struct #

type pageAlloc struct {
summary [summaryLevels][]pallocSum
chunks [*ast.BinaryExpr]*[*ast.BinaryExpr]pallocData
searchAddr offAddr
start chunkIdx
end chunkIdx
inUse addrRanges
scav struct{...}
mheapLock *mutex
sysStat *sysMemStat
summaryMappedReady uintptr
chunkHugePages bool
test bool
}

pageCache struct #

pageCache represents a per-p cache of pages the allocator can allocate from without a lock. More specifically, it represents a pageCachePages*pageSize chunk of memory with 0 or more free pages in it.

type pageCache struct {
base uintptr
cache uint64
scav uint64
}

pallocData struct #

pallocData encapsulates pallocBits and a bitmap for whether or not a given page is scavenged in a single structure. It's effectively a pallocBits with additional functionality. Update the comment on (*pageAlloc).chunks should this structure change.

type pallocData struct {
pallocBits
scavenged pageBits
}

pcHeader struct #

pcHeader holds data used by the pclntab lookups.

type pcHeader struct {
magic uint32
pad1 uint8
pad2 uint8
minLC uint8
ptrSize uint8
nfunc int
nfiles uint
textStart uintptr
funcnameOffset uintptr
cuOffset uintptr
filetabOffset uintptr
pctabOffset uintptr
pclnOffset uintptr
}

pcvalueCache struct #

type pcvalueCache struct {
entries [2][8]pcvalueCacheEnt
inUse int
}

pcvalueCacheEnt struct #

type pcvalueCacheEnt struct {
targetpc uintptr
off uint32
val int32
valPC uintptr
}

perThreadSyscallArgs struct #

perThreadSyscallArgs contains the system call number, arguments, and expected return values for a system call to be executed on all threads.

type perThreadSyscallArgs struct {
trap uintptr
a1 uintptr
a2 uintptr
a3 uintptr
a4 uintptr
a5 uintptr
a6 uintptr
r1 uintptr
r2 uintptr
}

persistentAlloc struct #

type persistentAlloc struct {
base *notInHeap
off uintptr
}

piController struct #

type piController struct {
kp float64
ti float64
tt float64
min float64
max float64
errIntegral float64
errOverflow bool
inputOverflow bool
}

pinState struct #

type pinState struct {
bytep *uint8
byteVal uint8
mask uint8
}

pinner struct #

type pinner struct {
refs []unsafe.Pointer
refStore [pinnerRefStoreSize]unsafe.Pointer
}

pollCache struct #

type pollCache struct {
lock mutex
first *pollDesc
}

pollDesc struct #

Network poller descriptor. No heap pointers.

type pollDesc struct {
_ sys.NotInHeap
link *pollDesc
fd uintptr
fdseq atomic.Uintptr
atomicInfo atomic.Uint32
rg atomic.Uintptr
wg atomic.Uintptr
lock mutex
closing bool
rrun bool
wrun bool
user uint32
rseq uintptr
rt timer
rd int64
wseq uintptr
wt timer
wd int64
self *pollDesc
}

pollOperation struct #

pollOperation must be the same as beginning of internal/poll.operation. Keep these in sync.

type pollOperation struct {
_ overlapped
pd *pollDesc
mode int32
}

pollfd struct #

pollfd represents the poll structure for AIX operating system.

type pollfd struct {
fd int32
events int16
revents int16
}

portevent struct #

type portevent struct {
portev_events int32
portev_source uint16
portev_pad uint16
portev_object uint64
portev_user *byte
}

profBuf struct #

A profBuf is a lock-free buffer for profiling events, safe for concurrent use by one reader and one writer. The writer may be a signal handler running without a user g. The reader is assumed to be a user g. Each logged event corresponds to a fixed size header, a list of uintptrs (typically a stack), and exactly one unsafe.Pointer tag. The header and uintptrs are stored in the circular buffer data and the tag is stored in a circular buffer tags, running in parallel. In the circular buffer data, each event takes 2+hdrsize+len(stk) words: the value 2+hdrsize+len(stk), then the time of the event, then hdrsize words giving the fixed-size header, and then len(stk) words for the stack. The current effective offsets into the tags and data circular buffers for reading and writing are stored in the high 30 and low 32 bits of r and w. The bottom bits of the high 32 are additional flag bits in w, unused in r. "Effective" offsets means the total number of reads or writes, mod 2^length. The offset in the buffer is the effective offset mod the length of the buffer. To make wraparound mod 2^length match wraparound mod length of the buffer, the length of the buffer must be a power of two. If the reader catches up to the writer, a flag passed to read controls whether the read blocks until more data is available. A read returns a pointer to the buffer data itself; the caller is assumed to be done with that data at the next read. The read offset rNext tracks the next offset to be returned by read. By definition, r ≤ rNext ≤ w (before wraparound), and rNext is only used by the reader, so it can be accessed without atomics. If the writer gets ahead of the reader, so that the buffer fills, future writes are discarded and replaced in the output stream by an overflow entry, which has size 2+hdrsize+1, time set to the time of the first discarded write, a header of all zeroed words, and a "stack" containing one word, the number of discarded writes. Between the time the buffer fills and the buffer becomes empty enough to hold more data, the overflow entry is stored as a pending overflow entry in the fields overflow and overflowTime. The pending overflow entry can be turned into a real record by either the writer or the reader. If the writer is called to write a new record and finds that the output buffer has room for both the pending overflow entry and the new record, the writer emits the pending overflow entry and the new record into the buffer. If the reader is called to read data and finds that the output buffer is empty but that there is a pending overflow entry, the reader will return a synthesized record for the pending overflow entry. Only the writer can create or add to a pending overflow entry, but either the reader or the writer can clear the pending overflow entry. A pending overflow entry is indicated by the low 32 bits of 'overflow' holding the number of discarded writes, and overflowTime holding the time of the first discarded write. The high 32 bits of 'overflow' increment each time the low 32 bits transition from zero to non-zero or vice versa. This sequence number avoids ABA problems in the use of compare-and-swap to coordinate between reader and writer. The overflowTime is only written when the low 32 bits of overflow are zero, that is, only when there is no pending overflow entry, in preparation for creating a new one. The reader can therefore fetch and clear the entry atomically using for { overflow = load(&b.overflow) if uint32(overflow) == 0 { // no pending entry break } time = load(&b.overflowTime) if cas(&b.overflow, overflow, ((overflow>>32)+1)<<32) { // pending entry cleared break } } if uint32(overflow) > 0 { emit entry for uint32(overflow), time }

type profBuf struct {
r profAtomic
w profAtomic
overflow atomic.Uint64
overflowTime atomic.Uint64
eof atomic.Uint32
hdrsize uintptr
data []uint64
tags []unsafe.Pointer
rNext profIndex
overflowBuf []uint64
wait note
}

ptabEntry struct #

A ptabEntry is generated by the compiler for each exported function and global variable in the main package of a plugin. It is used to initialize the plugin module's symbol map.

type ptabEntry struct {
name nameOff
typ typeOff
}

pthreadattr struct #

type pthreadattr struct {
__pthread_attrp *byte
}

pthreadattr struct #

type pthreadattr struct {
X__sig int64
X__opaque [56]int8
}

pthreadattr struct #

type pthreadattr struct {
X__sig int64
X__opaque [56]int8
}

pthreadcond struct #

type pthreadcond struct {
X__sig int64
X__opaque [40]int8
}

pthreadcond struct #

type pthreadcond struct {
X__sig int64
X__opaque [40]int8
}

pthreadcondattr struct #

type pthreadcondattr struct {
X__sig int64
X__opaque [8]int8
}

pthreadcondattr struct #

type pthreadcondattr struct {
X__sig int64
X__opaque [8]int8
}

pthreadmutex struct #

type pthreadmutex struct {
X__sig int64
X__opaque [56]int8
}

pthreadmutex struct #

type pthreadmutex struct {
X__sig int64
X__opaque [56]int8
}

pthreadmutexattr struct #

type pthreadmutexattr struct {
X__sig int64
X__opaque [8]int8
}

pthreadmutexattr struct #

type pthreadmutexattr struct {
X__sig int64
X__opaque [8]int8
}

ptregs struct #

type ptregs struct {
gpr [32]uint64
nip uint64
msr uint64
orig_gpr3 uint64
ctr uint64
link uint64
xer uint64
ccr uint64
softe uint64
trap uint64
dar uint64
dsisr uint64
result uint64
}

ptregs struct #

type ptregs struct {
gpr [32]uint64
nip uint64
msr uint64
orig_gpr3 uint64
ctr uint64
link uint64
xer uint64
ccr uint64
softe uint64
trap uint64
dar uint64
dsisr uint64
result uint64
}

randomEnum struct #

type randomEnum struct {
i uint32
count uint32
pos uint32
inc uint32
}

randomOrder struct #

randomOrder/randomEnum are helper types for randomized work stealing. They allow to enumerate all Ps in different pseudo-random orders without repetitions. The algorithm is based on the fact that if we have X such that X and GOMAXPROCS are coprime, then a sequences of (i + X) % GOMAXPROCS gives the required enumeration.

type randomOrder struct {
count uint32
coprimes []uint32
}

reflectMethodValue struct #

reflectMethodValue is a partial duplicate of reflect.makeFuncImpl and reflect.methodValue.

type reflectMethodValue struct {
fn uintptr
stack *bitvector
argLen uintptr
}

regmmst struct #

type regmmst struct {
mmst_reg [10]int8
mmst_rsrv [6]int8
}

regs32 struct #

type regs32 struct {
eax uint32
ebx uint32
ecx uint32
edx uint32
edi uint32
esi uint32
ebp uint32
esp uint32
ss uint32
eflags uint32
eip uint32
cs uint32
ds uint32
es uint32
fs uint32
gs uint32
}

regs64 struct #

type regs64 struct {
rax uint64
rbx uint64
rcx uint64
rdx uint64
rdi uint64
rsi uint64
rbp uint64
rsp uint64
r8 uint64
r9 uint64
r10 uint64
r11 uint64
r12 uint64
r13 uint64
r14 uint64
r15 uint64
rip uint64
rflags uint64
cs uint64
fs uint64
gs uint64
}

regs64 struct #

type regs64 struct {
x [29]uint64
fp uint64
lr uint64
sp uint64
pc uint64
cpsr uint32
__pad uint32
}

regxmm struct #

type regxmm struct {
xmm_reg [16]int8
}

rtprio struct #

type rtprio struct {
_type uint16
prio uint16
}

rtprio struct #

type rtprio struct {
_type uint16
prio uint16
}

rtprio struct #

type rtprio struct {
_type uint16
prio uint16
}

rtprio struct #

type rtprio struct {
_type uint16
prio uint16
}

rtprio struct #

type rtprio struct {
_type uint16
prio uint16
}

rtprio struct #

type rtprio struct {
_type uint16
prio uint16
}

rtype struct #

rtype is a wrapper that allows us to define additional methods.

type rtype struct {
*abi.Type
}

runtimeSelect struct #

A runtimeSelect is a single case passed to rselect. This must match ../reflect/value.go:/runtimeSelect

type runtimeSelect struct {
dir selectDir
typ unsafe.Pointer
ch *hchan
val unsafe.Pointer
}

rwmutex struct #

A rwmutex is a reader/writer mutual exclusion lock. The lock can be held by an arbitrary number of readers or a single writer. This is a variant of sync.RWMutex, for the runtime package. Like mutex, rwmutex blocks the calling M. It does not interact with the goroutine scheduler.

type rwmutex struct {
rLock mutex
readers muintptr
readerPass uint32
wLock mutex
writer muintptr
readerCount atomic.Int32
readerWait atomic.Int32
readRank lockRank
}

savedOpenDeferState struct #

savedOpenDeferState tracks the extra state from _panic that's necessary for deferreturn to pick up where gopanic left off, without needing to unwind the stack.

type savedOpenDeferState struct {
retpc uintptr
deferBitsOffset uintptr
slotsOffset uintptr
}

scase struct #

Select case descriptor. Known to compiler. Changes here must also be made in src/cmd/compile/internal/walk/select.go's scasetype.

type scase struct {
c *hchan
elem unsafe.Pointer
}

scavChunkData struct #

scavChunkData tracks information about a palloc chunk for scavenging. It packs well into 64 bits. The zero value always represents a valid newly-grown chunk.

type scavChunkData struct {
inUse uint16
lastInUse uint16
gen uint32
scavChunkFlags
}

scavengeIndex struct #

scavengeIndex is a structure for efficiently managing which pageAlloc chunks have memory available to scavenge.

type scavengeIndex struct {
chunks []atomicScavChunkData
min atomic.Uintptr
max atomic.Uintptr
minHeapIdx atomic.Uintptr
searchAddrBg atomicOffAddr
searchAddrForce atomicOffAddr
freeHWM offAddr
gen uint32
test bool
}

scavengerState struct #

type scavengerState struct {
lock mutex
g *g
timer *timer
sysmonWake atomic.Uint32
parked bool
printControllerReset bool
targetCPUFraction float64
sleepRatio float64
sleepController piController
controllerCooldown int64
sleepStub func(n int64) int64
scavenge func(n uintptr) (uintptr, int64)
shouldStop func() bool
gomaxprocs func() int32
}

schedt struct #

type schedt struct {
goidgen atomic.Uint64
lastpoll atomic.Int64
pollUntil atomic.Int64
lock mutex
midle muintptr
nmidle int32
nmidlelocked int32
mnext int64
maxmcount int32
nmsys int32
nmfreed int64
ngsys atomic.Int32
pidle puintptr
npidle atomic.Int32
nmspinning atomic.Int32
needspinning atomic.Uint32
runq gQueue
runqsize int32
disable struct{...}
gFree struct{...}
sudoglock mutex
sudogcache *sudog
deferlock mutex
deferpool *_defer
freem *m
gcwaiting atomic.Bool
stopwait int32
stopnote note
sysmonwait atomic.Bool
sysmonnote note
safePointFn func(*p)
safePointWait int32
safePointNote note
profilehz int32
procresizetime int64
totaltime int64
sysmonlock mutex
timeToRun timeHistogram
idleTime atomic.Int64
totalMutexWaitTime atomic.Int64
stwStoppingTimeGC timeHistogram
stwStoppingTimeOther timeHistogram
stwTotalTimeGC timeHistogram
stwTotalTimeOther timeHistogram
totalRuntimeLockWaitTime atomic.Int64
}

semaRoot struct #

A semaRoot holds a balanced tree of sudog with distinct addresses (s.elem). Each of those sudog may in turn point (through s.waitlink) to a list of other sudogs waiting on the same address. The operations on the inner lists of sudogs with the same address are all O(1). The scanning of the top-level semaRoot list is O(log n), where n is the number of distinct addresses with goroutines blocked on them that hash to the given semaRoot. See golang.org/issue/17953 for a program that worked badly before we introduced the second level of list, and BenchmarkSemTable/OneAddrCollision/* for a benchmark that exercises this.

type semaRoot struct {
lock mutex
treap *sudog
nwait atomic.Uint32
}

semt struct #

type semt struct {
sem_count uint32
sem_type uint16
sem_magic uint16
sem_pad1 [3]uint64
sem_pad2 [2]uint64
}

sigTabT struct #

sigTabT is the type of an entry in the global sigtable array. sigtable is inherently system dependent, and appears in OS-specific files, but sigTabT is the same for all Unixy systems. The sigtable array is indexed by a system signal number to get the flags and printable name of each signal.

type sigTabT struct {
flags int32
name string
}

sigTabT struct #

type sigTabT struct {
flags int
name string
}

sigactiont struct #

type sigactiont struct {
sa_sigaction uintptr
sa_mask uint32
sa_flags int32
}

sigactiont struct #

type sigactiont struct {
sa_handler uintptr
sa_flags uint64
sa_restorer uintptr
sa_mask uint64
}

sigactiont struct #

type sigactiont struct {
sa_handler uintptr
sa_flags uint64
sa_mask uint64
sa_restorer uintptr
}

sigactiont struct #

type sigactiont struct {
sa_handler uintptr
sa_flags uint64
sa_restorer uintptr
sa_mask uint64
}

sigactiont struct #

type sigactiont struct {
sa_handler uintptr
sa_flags uint32
sa_restorer uintptr
sa_mask uint64
}

sigactiont struct #

type sigactiont struct {
sa_handler uintptr
sa_flags int32
sa_mask sigset
}

sigactiont struct #

type sigactiont struct {
sa_handler uintptr
sa_flags uint64
sa_mask uint64
sa_restorer uintptr
}

sigactiont struct #

type sigactiont struct {
sa_handler uintptr
sa_flags uint64
sa_restorer uintptr
sa_mask uint64
}

sigactiont struct #

type sigactiont struct {
sa_sigaction uintptr
sa_mask sigset
sa_flags int32
}

sigactiont struct #

type sigactiont struct {
__sigaction_u [8]byte
sa_tramp unsafe.Pointer
sa_mask uint32
sa_flags int32
}

sigactiont struct #

type sigactiont struct {
sa_handler uintptr
sa_flags uint64
sa_restorer uintptr
sa_mask uint64
}

sigactiont struct #

type sigactiont struct {
__sigaction_u [8]byte
sa_tramp unsafe.Pointer
sa_mask uint32
sa_flags int32
}

sigactiont struct #

type sigactiont struct {
sa_flags uint32
sa_handler uintptr
sa_mask [2]uint64
sa_restorer uintptr
}

sigactiont struct #

type sigactiont struct {
sa_handler uintptr
sa_mask sigset
sa_flags int32
pad_cgo_0 [4]byte
}

sigactiont struct #

type sigactiont struct {
sa_handler uintptr
sa_flags uint32
sa_restorer uintptr
sa_mask uint64
}

sigactiont struct #

type sigactiont struct {
sa_sigaction uintptr
sa_flags int32
sa_mask sigset
}

sigactiont struct #

type sigactiont struct {
sa_flags int32
pad_cgo_0 [4]byte
_funcptr [8]byte
sa_mask sigset
}

sigactiont struct #

type sigactiont struct {
sa_flags uint32
sa_handler uintptr
sa_mask [4]uint32
sa_restorer uintptr
}

sigactiont struct #

type sigactiont struct {
sa_handler uintptr
sa_flags uint64
sa_restorer uintptr
sa_mask uint64
}

sigcontext struct #

type sigcontext struct {
sc_regmask uint32
sc_status uint32
sc_pc uint64
sc_regs [32]uint64
sc_fpregs [32]uint64
sc_acx uint32
sc_fpc_csr uint32
sc_fpc_eir uint32
sc_used_math uint32
sc_dsp uint32
sc_mdhi uint64
sc_mdlo uint64
sc_hi1 uint32
sc_lo1 uint32
sc_hi2 uint32
sc_lo2 uint32
sc_hi3 uint32
sc_lo3 uint32
}

sigcontext struct #

type sigcontext struct {
sc_cookie uint64
sc_mask uint64
sc_pc uint64
sc_regs [32]uint64
mullo uint64
mulhi uint64
sc_fpregs [33]uint64
sc_fpused uint64
sc_fpc_eir uint64
_xxx [8]int64
}

sigcontext struct #

type sigcontext struct {
sc_onstack int32
pad_cgo_0 [4]byte
sc_mask sigset
sc_uerror int32
sc_jmpbuf context64
}

sigcontext struct #

type sigcontext struct {
fault_address uint64
regs [31]uint64
sp uint64
pc uint64
pstate uint64
_pad [8]byte
__reserved [4096]byte
}

sigcontext struct #

type sigcontext struct {
__sc_unused int32
sc_mask int32
sc_spsr uint32
sc_r0 uint32
sc_r1 uint32
sc_r2 uint32
sc_r3 uint32
sc_r4 uint32
sc_r5 uint32
sc_r6 uint32
sc_r7 uint32
sc_r8 uint32
sc_r9 uint32
sc_r10 uint32
sc_r11 uint32
sc_r12 uint32
sc_usr_sp uint32
sc_usr_lr uint32
sc_svc_lr uint32
sc_pc uint32
sc_fpused uint32
sc_fpscr uint32
sc_fpreg [32]uint64
}

sigcontext struct #

type sigcontext struct {
r8 uint64
r9 uint64
r10 uint64
r11 uint64
r12 uint64
r13 uint64
r14 uint64
r15 uint64
rdi uint64
rsi uint64
rbp uint64
rbx uint64
rdx uint64
rax uint64
rcx uint64
rsp uint64
rip uint64
eflags uint64
cs uint16
gs uint16
fs uint16
__pad0 uint16
err uint64
trapno uint64
oldmask uint64
cr2 uint64
fpstate *fpstate1
__reserved1 [8]uint64
}

sigcontext struct #

type sigcontext struct {
sc_gs uint32
sc_fs uint32
sc_es uint32
sc_ds uint32
sc_edi uint32
sc_esi uint32
sc_ebp uint32
sc_ebx uint32
sc_edx uint32
sc_ecx uint32
sc_eax uint32
sc_eip uint32
sc_cs uint32
sc_eflags uint32
sc_esp uint32
sc_ss uint32
__sc_unused uint32
sc_mask uint32
sc_trapno uint32
sc_err uint32
sc_fpstate unsafe.Pointer
}

sigcontext struct #

type sigcontext struct {
psw_mask uint64
psw_addr uint64
gregs [16]uint64
aregs [16]uint32
fpc uint32
fpregs [16]uint64
}

sigcontext struct #

type sigcontext struct {
_unused [4]uint64
signal int32
_pad0 int32
handler uint64
oldmask uint64
regs *ptregs
gp_regs [48]uint64
fp_regs [33]float64
v_regs *vreg
vmx_reserve [101]int64
}

sigcontext struct #

type sigcontext struct {
_unused [4]uint64
signal int32
_pad0 int32
handler uint64
oldmask uint64
regs *ptregs
gp_regs [48]uint64
fp_regs [33]float64
v_regs *vreg
vmx_reserve [101]int64
}

sigcontext struct #

type sigcontext struct {
__sc_unused int32
sc_mask int32
sc_sp uintptr
sc_lr uintptr
sc_elr uintptr
sc_spsr uintptr
sc_x [30]uintptr
sc_cookie int64
}

sigcontext struct #

type sigcontext struct {
__sc_unused int32
sc_mask int32
sc_ra uintptr
sc_sp uintptr
sc_gp uintptr
sc_tp uintptr
sc_t [7]uintptr
sc_s [12]uintptr
sc_a [8]uintptr
sc_sepc uintptr
sc_f [32]uintptr
sc_fcsr uintptr
sc_cookie int64
}

sigcontext struct #

type sigcontext struct {
sc_rdi uint64
sc_rsi uint64
sc_rdx uint64
sc_rcx uint64
sc_r8 uint64
sc_r9 uint64
sc_r10 uint64
sc_r11 uint64
sc_r12 uint64
sc_r13 uint64
sc_r14 uint64
sc_r15 uint64
sc_rbp uint64
sc_rbx uint64
sc_rax uint64
sc_gs uint64
sc_fs uint64
sc_es uint64
sc_ds uint64
sc_trapno uint64
sc_err uint64
sc_rip uint64
sc_cs uint64
sc_rflags uint64
sc_rsp uint64
sc_ss uint64
sc_fpstate unsafe.Pointer
__sc_unused int32
sc_mask int32
}

sigcontext struct #

type sigcontext struct {
sc_pc uint64
sc_regs [32]uint64
sc_flags uint32
sc_pad0 [1]uint32
sc_extcontext [0]uint64
}

sigcontext struct #

type sigcontext struct {
sc_regs [32]uint64
sc_fpregs [32]uint64
sc_mdhi uint64
sc_hi1 uint64
sc_hi2 uint64
sc_hi3 uint64
sc_mdlo uint64
sc_lo1 uint64
sc_lo2 uint64
sc_lo3 uint64
sc_pc uint64
sc_fpc_csr uint32
sc_used_math uint32
sc_dsp uint32
sc_reserved uint32
}

sigcontext struct #

type sigcontext struct {
gs uint16
__gsh uint16
fs uint16
__fsh uint16
es uint16
__esh uint16
ds uint16
__dsh uint16
edi uint32
esi uint32
ebp uint32
esp uint32
ebx uint32
edx uint32
ecx uint32
eax uint32
trapno uint32
err uint32
eip uint32
cs uint16
__csh uint16
eflags uint32
esp_at_signal uint32
ss uint16
__ssh uint16
fpstate *fpstate
oldmask uint32
cr2 uint32
}

sigcontext struct #

type sigcontext struct {
trap_no uint32
error_code uint32
oldmask uint32
r0 uint32
r1 uint32
r2 uint32
r3 uint32
r4 uint32
r5 uint32
r6 uint32
r7 uint32
r8 uint32
r9 uint32
r10 uint32
fp uint32
ip uint32
sp uint32
lr uint32
pc uint32
cpsr uint32
fault_address uint32
}

sigcontext struct #

type sigcontext struct {
sc_regs user_regs_struct
sc_fpregs user_fpregs_struct
}

sigcontext struct #

type sigcontext struct {
sc_cookie uint64
sc_mask int32
sc_reg [32]uint64
sc_lr uint64
sc_cr uint64
sc_xer uint64
sc_ctr uint64
sc_pc uint64
sc_ps uint64
sc_vrsave uint64
pad_cgo_0 [8]byte
sc_vsx [64][16]uint8
sc_fpscr uint64
sc_vscr uint64
}

sigctxt struct #

type sigctxt struct {
info *siginfo
ctxt unsafe.Pointer
}

sigctxt struct #

type sigctxt struct {
info *siginfo
ctxt unsafe.Pointer
}

sigctxt struct #

type sigctxt struct {
info *siginfo
ctxt unsafe.Pointer
}

sigctxt struct #

type sigctxt struct {
info *siginfo
ctxt unsafe.Pointer
}

sigctxt struct #

type sigctxt struct {
info *siginfo
ctxt unsafe.Pointer
}

sigctxt struct #

type sigctxt struct {
info *siginfo
ctxt unsafe.Pointer
}

sigctxt struct #

type sigctxt struct {
info *siginfo
ctxt unsafe.Pointer
}

sigctxt struct #

type sigctxt struct {
info *siginfo
ctxt unsafe.Pointer
}

sigctxt struct #

type sigctxt struct {
info *siginfo
ctxt unsafe.Pointer
}

sigctxt struct #

type sigctxt struct {
u *ureg
}

sigctxt struct #

type sigctxt struct {
info *siginfo
ctxt unsafe.Pointer
}

sigctxt struct #

type sigctxt struct {
u *ureg
}

sigctxt struct #

type sigctxt struct {
info *siginfo
ctxt unsafe.Pointer
}

sigctxt struct #

type sigctxt struct {
info *siginfo
ctxt unsafe.Pointer
}

sigctxt struct #

type sigctxt struct {
info *siginfo
ctxt unsafe.Pointer
}

sigctxt struct #

type sigctxt struct {
info *siginfo
ctxt unsafe.Pointer
}

sigctxt struct #

type sigctxt struct {
info *siginfo
ctxt unsafe.Pointer
}

sigctxt struct #

type sigctxt struct {
u *ureg
}

sigctxt struct #

type sigctxt struct {
info *siginfo
ctxt unsafe.Pointer
}

sigctxt struct #

type sigctxt struct {
info *siginfo
ctxt unsafe.Pointer
}

sigctxt struct #

type sigctxt struct {
info *siginfo
ctxt unsafe.Pointer
}

sigctxt struct #

type sigctxt struct {
info *siginfo
ctxt unsafe.Pointer
}

sigctxt struct #

type sigctxt struct {
info *siginfo
ctxt unsafe.Pointer
}

sigctxt struct #

type sigctxt struct {
info *siginfo
ctxt unsafe.Pointer
}

sigctxt struct #

type sigctxt struct {
info *siginfo
ctxt unsafe.Pointer
}

sigctxt struct #

type sigctxt struct {
info *siginfo
ctxt unsafe.Pointer
}

sigctxt struct #

type sigctxt struct {
info *siginfo
ctxt unsafe.Pointer
}

sigctxt struct #

type sigctxt struct {
info *siginfo
ctxt unsafe.Pointer
}

sigctxt struct #

type sigctxt struct {
info *siginfo
ctxt unsafe.Pointer
}

sigctxt struct #

type sigctxt struct {
info *siginfo
ctxt unsafe.Pointer
}

sigctxt struct #

type sigctxt struct {
info *siginfo
ctxt unsafe.Pointer
}

sigctxt struct #

type sigctxt struct {
info *siginfo
ctxt unsafe.Pointer
}

sigctxt struct #

type sigctxt struct {
info *siginfo
ctxt unsafe.Pointer
}

sigctxt struct #

type sigctxt struct {
info *siginfo
ctxt unsafe.Pointer
}

sigevent struct #

type sigevent struct {
sigeventFields
_ [*ast.BinaryExpr]byte
}

sigevent struct #

type sigevent struct {
sigeventFields
_ [*ast.BinaryExpr]byte
}

sigevent struct #

type sigevent struct {
sigeventFields
_ [*ast.BinaryExpr]byte
}

sigevent struct #

type sigevent struct {
sigeventFields
_ [*ast.BinaryExpr]byte
}

sigevent struct #

type sigevent struct {
sigeventFields
_ [*ast.BinaryExpr]byte
}

sigevent struct #

type sigevent struct {
sigeventFields
_ [*ast.BinaryExpr]byte
}

sigevent struct #

type sigevent struct {
sigeventFields
_ [*ast.BinaryExpr]byte
}

sigevent struct #

type sigevent struct {
sigeventFields
_ [*ast.BinaryExpr]byte
}

sigevent struct #

type sigevent struct {
sigeventFields
_ [*ast.BinaryExpr]byte
}

sigevent struct #

type sigevent struct {
sigeventFields
_ [*ast.BinaryExpr]byte
}

sigevent struct #

type sigevent struct {
sigeventFields
_ [*ast.BinaryExpr]byte
}

sigeventFields struct #

type sigeventFields struct {
value uintptr
signo int32
notify int32
sigev_notify_thread_id int32
}

sigeventFields struct #

type sigeventFields struct {
value uintptr
signo int32
notify int32
sigev_notify_thread_id int32
}

sigeventFields struct #

type sigeventFields struct {
value uintptr
signo int32
notify int32
sigev_notify_thread_id int32
}

sigeventFields struct #

type sigeventFields struct {
value uintptr
signo int32
notify int32
sigev_notify_thread_id int32
}

sigeventFields struct #

type sigeventFields struct {
value uintptr
signo int32
notify int32
sigev_notify_thread_id int32
}

sigeventFields struct #

type sigeventFields struct {
value uintptr
signo int32
notify int32
sigev_notify_thread_id int32
}

sigeventFields struct #

type sigeventFields struct {
value uintptr
signo int32
notify int32
sigev_notify_thread_id int32
}

sigeventFields struct #

type sigeventFields struct {
value uintptr
signo int32
notify int32
sigev_notify_thread_id int32
}

sigeventFields struct #

type sigeventFields struct {
value uintptr
signo int32
notify int32
sigev_notify_thread_id int32
}

sigeventFields struct #

type sigeventFields struct {
value uintptr
signo int32
notify int32
sigev_notify_thread_id int32
}

sigeventFields struct #

type sigeventFields struct {
value uintptr
signo int32
notify int32
sigev_notify_thread_id int32
}

siginfo struct #

type siginfo struct {
si_signo int32
si_errno int32
si_code int32
si_pid int32
si_uid uint32
si_status int32
si_addr uintptr
si_band int64
si_value [2]int32
__si_flags int32
__pad [3]int32
}

siginfo struct #

type siginfo struct {
siginfoFields
_ [*ast.BinaryExpr]byte
}

siginfo struct #

type siginfo struct {
_signo int32
_code int32
_errno int32
_pad int32
_reason [24]byte
}

siginfo struct #

type siginfo struct {
siginfoFields
_ [*ast.BinaryExpr]byte
}

siginfo struct #

type siginfo struct {
si_signo int32
si_errno int32
si_code int32
si_pid int32
si_uid uint32
si_status int32
si_addr uint64
si_value [8]byte
_reason [40]byte
}

siginfo struct #

type siginfo struct {
si_signo int32
si_code int32
si_errno int32
pad_cgo_0 [4]byte
_data [120]byte
}

siginfo struct #

type siginfo struct {
_signo int32
_code int32
_errno int32
_reason uintptr
_reasonx [16]byte
}

siginfo struct #

type siginfo struct {
si_signo int32
si_code int32
si_errno int32
pad_cgo_0 [4]byte
_data [120]byte
}

siginfo struct #

type siginfo struct {
si_signo int32
si_code int32
si_errno int32
_data [116]byte
}

siginfo struct #

type siginfo struct {
siginfoFields
_ [*ast.BinaryExpr]byte
}

siginfo struct #

type siginfo struct {
siginfoFields
_ [*ast.BinaryExpr]byte
}

siginfo struct #

type siginfo struct {
_signo int32
_code int32
_errno int32
_reason [20]byte
}

siginfo struct #

type siginfo struct {
si_signo int32
si_errno int32
si_code int32
si_pid int32
si_uid uint32
si_status int32
si_addr *byte
si_value [8]byte
si_band int64
__pad [7]uint64
}

siginfo struct #

type siginfo struct {
si_signo int32
si_errno int32
si_code int32
si_pid int32
si_uid uint32
si_status int32
si_addr uintptr
si_value [4]byte
_reason [32]byte
}

siginfo struct #

type siginfo struct {
si_signo int32
si_errno int32
si_code int32
si_pid int32
si_uid uint32
si_status int32
si_addr uint64
si_value [8]byte
si_band int64
__pad [7]uint64
}

siginfo struct #

type siginfo struct {
siginfoFields
_ [*ast.BinaryExpr]byte
}

siginfo struct #

type siginfo struct {
siginfoFields
_ [*ast.BinaryExpr]byte
}

siginfo struct #

type siginfo struct {
si_signo int32
si_code int32
si_errno int32
pad_cgo_0 [4]byte
_data [120]byte
}

siginfo struct #

type siginfo struct {
si_signo int32
si_code int32
si_errno int32
pad_cgo_0 [4]byte
_data [120]byte
}

siginfo struct #

type siginfo struct {
si_signo int32
si_code int32
si_errno int32
pad_cgo_0 [4]byte
_data [120]byte
}

siginfo struct #

type siginfo struct {
_signo int32
_code int32
_errno int32
_reason uintptr
_reasonx [16]byte
}

siginfo struct #

type siginfo struct {
si_signo int32
si_errno int32
si_code int32
si_pid int32
si_uid uint32
si_status int32
si_addr uint64
si_value [8]byte
_reason [40]byte
}

siginfo struct #

type siginfo struct {
siginfoFields
_ [*ast.BinaryExpr]byte
}

siginfo struct #

type siginfo struct {
siginfoFields
_ [*ast.BinaryExpr]byte
}

siginfo struct #

type siginfo struct {
siginfoFields
_ [*ast.BinaryExpr]byte
}

siginfo struct #

type siginfo struct {
si_signo int32
si_errno int32
si_code int32
si_pid int32
si_uid uint32
si_status int32
si_addr uint64
si_value [8]byte
si_band int64
__spare__ [7]int32
pad_cgo_0 [4]byte
}

siginfo struct #

type siginfo struct {
si_signo int32
si_code int32
si_errno int32
pad_cgo_0 [4]byte
_data [120]byte
}

siginfo struct #

type siginfo struct {
si_signo int32
si_code int32
si_errno int32
si_pad int32
__data [240]byte
}

siginfo struct #

type siginfo struct {
siginfoFields
_ [*ast.BinaryExpr]byte
}

siginfo struct #

type siginfo struct {
si_signo int32
si_errno int32
si_code int32
si_pid int32
si_uid uint32
si_status int32
si_addr uintptr
si_value [4]byte
_reason [32]byte
}

siginfo struct #

type siginfo struct {
si_signo int32
si_errno int32
si_code int32
si_pid int32
si_uid uint32
si_status int32
si_addr uint64
si_value [8]byte
_reason [40]byte
}

siginfo struct #

type siginfo struct {
siginfoFields
_ [*ast.BinaryExpr]byte
}

siginfoFields struct #

type siginfoFields struct {
si_signo int32
si_errno int32
si_code int32
si_addr uint32
}

siginfoFields struct #

type siginfoFields struct {
si_signo int32
si_code int32
si_errno int32
__pad0 [1]int32
si_addr uint64
}

siginfoFields struct #

type siginfoFields struct {
si_signo int32
si_errno int32
si_code int32
si_addr uint32
}

siginfoFields struct #

type siginfoFields struct {
si_signo int32
si_errno int32
si_code int32
__pad0 [1]int32
si_addr uint64
}

siginfoFields struct #

type siginfoFields struct {
si_signo int32
si_errno int32
si_code int32
si_addr uint64
}

siginfoFields struct #

type siginfoFields struct {
si_signo int32
si_errno int32
si_code int32
si_addr uint64
}

siginfoFields struct #

type siginfoFields struct {
si_signo int32
si_errno int32
si_code int32
si_addr uint64
}

siginfoFields struct #

type siginfoFields struct {
si_signo int32
si_code int32
si_errno int32
si_addr uint32
}

siginfoFields struct #

type siginfoFields struct {
si_signo int32
si_errno int32
si_code int32
si_addr uint64
}

siginfoFields struct #

type siginfoFields struct {
si_signo int32
si_errno int32
si_code int32
si_addr uint64
}

siginfoFields struct #

type siginfoFields struct {
si_signo int32
si_errno int32
si_code int32
si_addr uint64
}

sigset struct #

type sigset struct {
__bits [4]uint32
}

sigset struct #

type sigset struct {
__bits [4]uint32
}

sigset struct #

type sigset struct {

}

sigset struct #

type sigset struct {

}

sigset struct #

type sigset struct {
__bits [4]uint32
}

sigset struct #

type sigset struct {

}

sigset struct #

type sigset struct {
__bits [4]uint32
}

sigset struct #

type sigset struct {
__bits [4]uint32
}

sigset struct #

type sigset struct {
__bits [4]uint32
}

sigset struct #

type sigset struct {
__bits [4]uint32
}

sigset struct #

type sigset struct {
__bits [4]uint32
}

sigset struct #

type sigset struct {
__bits [4]uint32
}

sigset struct #

type sigset struct {
__sigbits [4]uint32
}

sigset struct #

type sigset struct {
__bits [4]uint32
}

slice struct #

type slice struct {
array unsafe.Pointer
len int
cap int
}

sockaddr_un struct #

type sockaddr_un struct {
family uint16
path [108]byte
}

sockaddr_un struct #

type sockaddr_un struct {
family uint16
path [108]byte
}

sockaddr_un struct #

type sockaddr_un struct {
family uint16
path [108]byte
}

sockaddr_un struct #

type sockaddr_un struct {
family uint16
path [108]byte
}

spanSet struct #

A spanSet is a set of *mspans. spanSet is safe for concurrent push and pop operations.

type spanSet struct {
spineLock mutex
spine atomicSpanSetSpinePointer
spineLen atomic.Uintptr
spineCap uintptr
index atomicHeadTailIndex
}

spanSetBlock struct #

type spanSetBlock struct {
lfnode
popped atomic.Uint32
spans [spanSetBlockEntries]atomicMSpanPointer
}

spanSetBlockAlloc struct #

spanSetBlockAlloc represents a concurrent pool of spanSetBlocks.

type spanSetBlockAlloc struct {
stack lfstack
}

spanSetSpinePointer struct #

spanSetSpinePointer represents a pointer to a contiguous block of atomic.Pointer[spanSetBlock].

type spanSetSpinePointer struct {
p unsafe.Pointer
}

special struct #

type special struct {
_ sys.NotInHeap
next *special
offset uintptr
kind byte
}

specialCleanup struct #

The described object has a cleanup set for it.

type specialCleanup struct {
_ sys.NotInHeap
special special
fn *funcval
id uint64
}

specialPinCounter struct #

specialPinCounter tracks whether an object is pinned multiple times.

type specialPinCounter struct {
special special
counter uintptr
}

specialReachable struct #

specialReachable tracks whether an object is reachable on the next GC cycle. This is used by testing.

type specialReachable struct {
special special
done bool
reachable bool
}

specialWeakHandle struct #

The described object has a weak pointer. Weak pointers in the GC have the following invariants: - Strong-to-weak conversions must ensure the strong pointer remains live until the weak handle is installed. This ensures that creating a weak pointer cannot fail. - Weak-to-strong conversions require the weakly-referenced object to be swept before the conversion may proceed. This ensures that weak-to-strong conversions cannot resurrect dead objects by sweeping them before that happens. - Weak handles are unique and canonical for each byte offset into an object that a strong pointer may point to, until an object becomes unreachable. - Weak handles contain nil as soon as an object becomes unreachable the first time, before a finalizer makes it reachable again. New weak handles created after resurrection are newly unique. specialWeakHandle is allocated from non-GC'd memory, so any heap pointers must be specially handled.

type specialWeakHandle struct {
_ sys.NotInHeap
special special
handle *atomic.Uintptr
}

specialfinalizer struct #

The described object has a finalizer set for it. specialfinalizer is allocated from non-GC'd memory, so any heap pointers must be specially handled.

type specialfinalizer struct {
_ sys.NotInHeap
special special
fn *funcval
nret uintptr
fint *_type
ot *ptrtype
}

specialprofile struct #

The described object is being heap profiled.

type specialprofile struct {
_ sys.NotInHeap
special special
b *bucket
}

specialsIter struct #

specialsIter helps iterate over specials lists.

type specialsIter struct {
pprev **special
s *special
}

srcFunc struct #

A srcFunc represents a logical function in the source code. This may correspond to an actual symbol in the binary text, or it may correspond to a source function that has been inlined.

type srcFunc struct {
datap *moduledata
nameOff int32
startLine int32
funcID abi.FuncID
}

stack struct #

Stack describes a Go execution stack. The bounds of the stack are exactly [lo, hi), with no implicit data structures on either side.

type stack struct {
lo uintptr
hi uintptr
}

stackObject struct #

A stackObject represents a variable on the stack that has had its address taken.

type stackObject struct {
_ sys.NotInHeap
off uint32
size uint32
r *stackObjectRecord
left *stackObject
right *stackObject
}

stackObjectBuf struct #

Buffer for stack objects found on a goroutine stack. Must be smaller than or equal to workbuf.

type stackObjectBuf struct {
_ sys.NotInHeap
stackObjectBufHdr
obj [*ast.BinaryExpr]stackObject
}

stackObjectBufHdr struct #

type stackObjectBufHdr struct {
_ sys.NotInHeap
workbufhdr
next *stackObjectBuf
}

stackObjectRecord struct #

A stackObjectRecord is generated by the compiler for each stack object in a stack frame. This record must match the generator code in cmd/compile/internal/liveness/plive.go:emitStackObjects.

type stackObjectRecord struct {
off int32
size int32
ptrBytes int32
gcdataoff uint32
}

stackScanState struct #

A stackScanState keeps track of the state used during the GC walk of a goroutine.

type stackScanState struct {
stack stack
conservative bool
buf *stackWorkBuf
freeBuf *stackWorkBuf
cbuf *stackWorkBuf
head *stackObjectBuf
tail *stackObjectBuf
nobjs int
root *stackObject
}

stackWorkBuf struct #

Buffer for pointers found during stack tracing. Must be smaller than or equal to workbuf.

type stackWorkBuf struct {
_ sys.NotInHeap
stackWorkBufHdr
obj [*ast.BinaryExpr]uintptr
}

stackWorkBufHdr struct #

Header declaration must come after the buf declaration above, because of issue #14620.

type stackWorkBufHdr struct {
_ sys.NotInHeap
workbufhdr
next *stackWorkBuf
}

stackfreelist struct #

type stackfreelist struct {
list gclinkptr
size uintptr
}

stackmap struct #

type stackmap struct {
n int32
nbit int32
bytedata [1]byte
}

stackpoolItem struct #

type stackpoolItem struct {
_ sys.NotInHeap
mu mutex
span mSpanList
}

stackt struct #

type stackt struct {
ss_sp *byte
ss_flags int32
pad_cgo_0 [4]byte
ss_size uintptr
}

stackt struct #

type stackt struct {
ss_sp *byte
ss_size uintptr
ss_flags int32
}

stackt struct #

type stackt struct {
ss_sp uintptr
ss_size uintptr
ss_flags int32
}

stackt struct #

type stackt struct {
ss_sp uintptr
ss_size uintptr
ss_flags int32
pad_cgo_0 [4]byte
}

stackt struct #

type stackt struct {
ss_sp *byte
ss_flags int32
pad_cgo_0 [4]byte
ss_size uintptr
}

stackt struct #

type stackt struct {
ss_sp *byte
ss_flags int32
pad_cgo_0 [4]byte
ss_size uintptr
}

stackt struct #

type stackt struct {
ss_sp uintptr
ss_size uintptr
ss_flags int32
__pad [4]int32
pas_cgo_0 [4]byte
}

stackt struct #

type stackt struct {
ss_sp *byte
ss_flags int32
ss_size uintptr
}

stackt struct #

type stackt struct {
ss_sp uintptr
ss_size uintptr
ss_flags int32
pad_cgo_0 [4]byte
}

stackt struct #

type stackt struct {
ss_sp uintptr
ss_size uintptr
ss_flags int32
pad_cgo_0 [4]byte
}

stackt struct #

type stackt struct {
ss_sp uintptr
ss_size uintptr
ss_flags int32
pad_cgo_0 [4]byte
}

stackt struct #

type stackt struct {
ss_sp uintptr
ss_size uintptr
ss_flags int32
}

stackt struct #

type stackt struct {
ss_sp *byte
ss_flags int32
ss_size uintptr
}

stackt struct #

type stackt struct {
ss_sp uintptr
ss_size uintptr
ss_flags int32
pad_cgo_0 [4]byte
}

stackt struct #

type stackt struct {
ss_sp uintptr
ss_size uintptr
ss_flags int32
pad_cgo_0 [4]byte
}

stackt struct #

type stackt struct {
ss_sp *byte
ss_size uintptr
ss_flags int32
pad_cgo_0 [4]byte
}

stackt struct #

type stackt struct {
ss_sp *byte
ss_size uintptr
ss_flags int32
}

stackt struct #

type stackt struct {
ss_sp uintptr
ss_size uintptr
ss_flags int32
pad_cgo_0 [4]byte
}

stackt struct #

type stackt struct {
ss_sp uintptr
ss_size uintptr
ss_flags int32
pad_cgo_0 [4]byte
}

stackt struct #

type stackt struct {
ss_sp *byte
ss_flags int32
pad_cgo_0 [4]byte
ss_size uintptr
}

stackt struct #

type stackt struct {
ss_sp uintptr
ss_size uintptr
ss_flags int32
}

stackt struct #

type stackt struct {
ss_sp uintptr
ss_size uintptr
ss_flags int32
}

stackt struct #

type stackt struct {
ss_sp *byte
ss_flags int32
ss_size uintptr
}

stackt struct #

type stackt struct {
ss_sp uintptr
ss_size uintptr
ss_flags int32
}

stackt struct #

type stackt struct {
ss_sp *byte
ss_size uintptr
ss_flags int32
pad_cgo_0 [4]byte
}

stackt struct #

type stackt struct {
ss_sp *byte
ss_size uintptr
ss_flags int32
pad_cgo_0 [4]byte
}

stackt struct #

type stackt struct {
ss_sp uintptr
ss_size uintptr
ss_flags int32
}

stackt struct #

type stackt struct {
ss_sp *byte
ss_flags int32
pad_cgo_0 [4]byte
ss_size uintptr
}

stackt struct #

type stackt struct {
ss_sp uintptr
ss_size uintptr
ss_flags int32
pad_cgo_0 [4]byte
}

stackt struct #

type stackt struct {
ss_sp uintptr
ss_size uintptr
ss_flags int32
pad_cgo_0 [4]byte
}

stackt struct #

type stackt struct {
ss_sp *byte
ss_flags int32
ss_size uintptr
}

stackt struct #

type stackt struct {
ss_sp uintptr
ss_size uintptr
ss_flags int32
}

stat struct #

type stat struct {
st_dev uint64
st_ino uint64
st_mode uint32
st_nlink uint32
st_uid uint32
st_gid uint32
st_rdev uint64
st_size int64
st_atim timespec
st_mtim timespec
st_ctim timespec
st_blksize int32
pad_cgo_0 [4]byte
st_blocks int64
st_fstype [16]int8
}

statAggregate struct #

statAggregate is the main driver of the metrics implementation. It contains multiple aggregates of runtime statistics, as well as a set of these aggregates that it has populated. The aggregates are populated lazily by its ensure method.

type statAggregate struct {
ensured statDepSet
heapStats heapStatsAggregate
sysStats sysStatsAggregate
cpuStats cpuStatsAggregate
gcStats gcStatsAggregate
}

stkframe struct #

A stkframe holds information about a single physical stack frame.

type stkframe struct {
fn funcInfo
pc uintptr
continpc uintptr
lr uintptr
sp uintptr
fp uintptr
varp uintptr
argp uintptr
}

stringStruct struct #

type stringStruct struct {
str unsafe.Pointer
len int
}

stringStructDWARF struct #

Variant with *byte pointer type for DWARF debugging.

type stringStructDWARF struct {
str *byte
len int
}

subscription struct #

type subscription struct {
_ structs.HostLayout
userdata userdata
u subscriptionUnion
}

subscriptionClock struct #

type subscriptionClock struct {
_ structs.HostLayout
id clockid
timeout timestamp
precision timestamp
flags subclockflags
}

subscriptionFdReadwrite struct #

type subscriptionFdReadwrite struct {
_ structs.HostLayout
fd int32
}

sudog struct #

sudog (pseudo-g) represents a g in a wait list, such as for sending/receiving on a channel. sudog is necessary because the g ↔ synchronization object relation is many-to-many. A g can be on many wait lists, so there may be many sudogs for one g; and many gs may be waiting on the same synchronization object, so there may be many sudogs for one object. sudogs are allocated from a special pool. Use acquireSudog and releaseSudog to allocate and free them.

type sudog struct {
g *g
next *sudog
prev *sudog
elem unsafe.Pointer
acquiretime int64
releasetime int64
ticket uint32
isSelect bool
success bool
waiters uint16
parent *sudog
waitlink *sudog
waittail *sudog
c *hchan
}

suspendGState struct #

type suspendGState struct {
g *g
dead bool
stopped bool
}

sweepLocked struct #

sweepLocked represents sweep ownership of a span.

type sweepLocked struct {
*mspan
}

sweepLocker struct #

sweepLocker acquires sweep ownership of spans.

type sweepLocker struct {
sweepGen uint32
valid bool
}

sweepdata struct #

State of background sweep.

type sweepdata struct {
lock mutex
g *g
parked bool
active activeSweep
centralIndex sweepClass
}

symbolizeCodeContext struct #

type symbolizeCodeContext struct {
pc uintptr
fn *byte
file *byte
line uintptr
off uintptr
res uintptr
}

symbolizeDataContext struct #

type symbolizeDataContext struct {
addr uintptr
heap uintptr
start uintptr
size uintptr
name *byte
file *byte
line uintptr
res uintptr
}

synctestGroup struct #

A synctestGroup is a group of goroutines started by synctest.Run.

type synctestGroup struct {
mu mutex
timers timers
now int64
root *g
waiter *g
waiting bool
total int
running int
active int
}

sysStatsAggregate struct #

sysStatsAggregate represents system memory stats obtained from the runtime. This set of stats is grouped together because they're all relatively cheap to acquire and generally independent of one another and other runtime memory stats. The fact that they may be acquired at different times, especially with respect to heapStatsAggregate, means there could be some skew, but because of these stats are independent, there's no real consistency issue here.

type sysStatsAggregate struct {
stacksSys uint64
mSpanSys uint64
mSpanInUse uint64
mCacheSys uint64
mCacheInUse uint64
buckHashSys uint64
gcMiscSys uint64
otherSys uint64
heapGoal uint64
gcCyclesDone uint64
gcCyclesForced uint64
}

sysmontick struct #

type sysmontick struct {
schedtick uint32
syscalltick uint32
schedwhen int64
syscallwhen int64
}

systeminfo struct #

type systeminfo struct {
anon0 [4]byte
dwpagesize uint32
lpminimumapplicationaddress *byte
lpmaximumapplicationaddress *byte
dwactiveprocessormask uintptr
dwnumberofprocessors uint32
dwprocessortype uint32
dwallocationgranularity uint32
wprocessorlevel uint16
wprocessorrevision uint16
}

textsect struct #

type textsect struct {
vaddr uintptr
end uintptr
baseaddr uintptr
}

tforkt struct #

type tforkt struct {
tf_tcb unsafe.Pointer
tf_tid *int32
tf_stack uintptr
}

tforkt struct #

type tforkt struct {
tf_tcb unsafe.Pointer
tf_tid *int32
tf_stack uintptr
}

tforkt struct #

type tforkt struct {
tf_tcb unsafe.Pointer
tf_tid *int32
tf_stack uintptr
}

tforkt struct #

type tforkt struct {
tf_tcb unsafe.Pointer
tf_tid *int32
tf_stack uintptr
}

tforkt struct #

type tforkt struct {
tf_tcb unsafe.Pointer
tf_tid *int32
tf_stack uintptr
}

tforkt struct #

type tforkt struct {
tf_tcb unsafe.Pointer
tf_tid *int32
tf_stack uintptr
}

tforkt struct #

type tforkt struct {
tf_tcb unsafe.Pointer
tf_tid *int32
tf_stack uintptr
}

thrparam struct #

type thrparam struct {
start_func uintptr
arg unsafe.Pointer
stack_base uintptr
stack_size uintptr
tls_base unsafe.Pointer
tls_size uintptr
child_tid unsafe.Pointer
parent_tid *int32
flags int32
rtp *rtprio
spare [3]uintptr
}

thrparam struct #

type thrparam struct {
start_func uintptr
arg unsafe.Pointer
stack_base uintptr
stack_size uintptr
tls_base unsafe.Pointer
tls_size uintptr
child_tid unsafe.Pointer
parent_tid *int64
flags int32
pad_cgo_0 [4]byte
rtp *rtprio
spare [3]uintptr
}

thrparam struct #

type thrparam struct {
start_func uintptr
arg unsafe.Pointer
stack_base uintptr
stack_size uintptr
tls_base unsafe.Pointer
tls_size uintptr
child_tid unsafe.Pointer
parent_tid *int32
flags int32
rtp *rtprio
spare [3]uintptr
}

thrparam struct #

type thrparam struct {
start_func uintptr
arg unsafe.Pointer
stack_base uintptr
stack_size uintptr
tls_base unsafe.Pointer
tls_size uintptr
child_tid unsafe.Pointer
parent_tid *int64
flags int32
pad_cgo_0 [4]byte
rtp *rtprio
spare [3]uintptr
}

thrparam struct #

type thrparam struct {
start_func uintptr
arg unsafe.Pointer
stack_base uintptr
stack_size uintptr
tls_base unsafe.Pointer
tls_size uintptr
child_tid unsafe.Pointer
parent_tid *int64
flags int32
pad_cgo_0 [4]byte
rtp *rtprio
spare [3]uintptr
}

ticksType struct #

type ticksType struct {
lock mutex
startTicks int64
startTime int64
val atomic.Int64
}

timeHistogram struct #

timeHistogram represents a distribution of durations in nanoseconds. The accuracy and range of the histogram is defined by the timeHistSubBucketBits and timeHistNumBuckets constants. It is an HDR histogram with exponentially-distributed buckets and linearly distributed sub-buckets. The histogram is safe for concurrent reads and writes.

type timeHistogram struct {
counts [*ast.BinaryExpr]atomic.Uint64
underflow atomic.Uint64
overflow atomic.Uint64
}

timeTimer struct #

A timeTimer is a runtime-allocated time.Timer or time.Ticker with the additional runtime state following it. The runtime state is inaccessible to package time.

type timeTimer struct {
c unsafe.Pointer
init bool
timer
}

timeoutEvent struct #

type timeoutEvent struct {
id int32
time int64
}

timer struct #

A timer is a potentially repeating trigger for calling t.f(t.arg, t.seq). Timers are allocated by client code, often as part of other data structures. Each P has a heap of pointers to timers that it manages. A timer is expected to be used by only one client goroutine at a time, but there will be concurrent access by the P managing that timer. Timer accesses are protected by the lock t.mu, with a snapshot of t's state bits published in t.astate to enable certain fast paths to make decisions about a timer without acquiring the lock.

type timer struct {
mu mutex
astate atomic.Uint8
state uint8
isChan bool
isFake bool
blocked uint32
when int64
period int64
f func(arg any, seq uintptr, delay int64)
arg any
seq uintptr
ts *timers
sendLock mutex
isSending atomic.Int32
}

timerWhen struct #

type timerWhen struct {
timer *timer
when int64
}

timers struct #

A timers is a per-P set of timers.

type timers struct {
mu mutex
heap []timerWhen
len atomic.Uint32
zombies atomic.Int32
raceCtx uintptr
minWhenHeap atomic.Int64
minWhenModified atomic.Int64
syncGroup *synctestGroup
}

timespec struct #

type timespec struct {
tv_sec int64
tv_nsec int64
}

timespec struct #

type timespec struct {
tv_sec int64
tv_nsec int32
}

timespec struct #

type timespec struct {
tv_sec int64
tv_nsec int64
}

timespec struct #

type timespec struct {
tv_sec int64
tv_nsec int64
}

timespec struct #

type timespec struct {
tv_sec int64
tv_nsec int64
}

timespec struct #

type timespec struct {
tv_sec int64
tv_nsec int64
}

timespec struct #

type timespec struct {
tv_sec int64
tv_nsec int64
}

timespec struct #

type timespec struct {
tv_sec int64
tv_nsec int64
}

timespec struct #

type timespec struct {
tv_sec int64
tv_nsec int64
}

timespec struct #

type timespec struct {
tv_sec int64
tv_nsec int64
}

timespec struct #

type timespec struct {
tv_sec int64
tv_nsec int32
}

timespec struct #

type timespec struct {
tv_sec int64
tv_nsec int64
}

timespec struct #

type timespec struct {
tv_sec int64
tv_nsec int64
}

timespec struct #

type timespec struct {
tv_sec int64
tv_nsec int64
}

timespec struct #

type timespec struct {
tv_sec int32
tv_nsec int32
}

timespec struct #

type timespec struct {
tv_sec int64
tv_nsec int64
}

timespec struct #

type timespec struct {
tv_sec int64
tv_nsec int64
}

timespec struct #

type timespec struct {
tv_sec int64
tv_nsec int32
_ [4]byte
}

timespec struct #

type timespec struct {
tv_sec int64
tv_nsec int32
pad_cgo_0 [4]byte
}

timespec struct #

type timespec struct {
tv_sec int64
tv_nsec int64
}

timespec struct #

type timespec struct {
tv_sec int64
tv_nsec int64
}

timespec struct #

type timespec struct {
tv_sec int64
tv_nsec int64
}

timespec struct #

type timespec struct {
tv_sec int32
tv_nsec int32
}

timespec struct #

type timespec struct {
tv_sec int32
tv_nsec int32
}

timespec struct #

type timespec struct {
tv_sec int64
tv_nsec int64
}

timespec struct #

type timespec struct {
tv_sec int64
tv_nsec int32
pad_cgo_0 [4]byte
}

timespec struct #

type timespec struct {
tv_sec int64
tv_nsec int64
}

timespec struct #

type timespec struct {
tv_sec int64
tv_nsec int64
}

timespec struct #

type timespec struct {
tv_sec int32
tv_nsec int32
}

timespec struct #

type timespec struct {
tv_sec int64
tv_nsec int64
}

timespec struct #

type timespec struct {
tv_sec int64
tv_nsec int64
}

timespec struct #

type timespec struct {
tv_sec int64
tv_nsec int64
}

timeval struct #

type timeval struct {
tv_sec int64
tv_usec int64
}

timeval struct #

type timeval struct {
tv_sec int64
tv_usec int64
}

timeval struct #

type timeval struct {
tv_sec int64
tv_usec int64
}

timeval struct #

type timeval struct {
tv_sec int64
tv_usec int64
}

timeval struct #

type timeval struct {
tv_sec int64
tv_usec int64
}

timeval struct #

type timeval struct {
tv_sec int64
tv_usec int64
}

timeval struct #

type timeval struct {
tv_sec int64
tv_usec int64
}

timeval struct #

type timeval struct {
tv_sec int32
tv_usec int32
}

timeval struct #

type timeval struct {
tv_sec int64
tv_usec int32
}

timeval struct #

type timeval struct {
tv_sec int64
tv_usec int32
pad_cgo_0 [4]byte
}

timeval struct #

type timeval struct {
tv_sec int64
tv_usec int64
}

timeval struct #

type timeval struct {
tv_sec int64
tv_usec int64
}

timeval struct #

type timeval struct {
tv_sec int64
tv_usec int64
}

timeval struct #

type timeval struct {
tv_sec int64
tv_usec int64
}

timeval struct #

type timeval struct {
tv_sec int64
tv_usec int64
}

timeval struct #

type timeval struct {
tv_sec int32
tv_usec int32
}

timeval struct #

type timeval struct {
tv_sec int64
tv_usec int32
pad_cgo_0 [4]byte
}

timeval struct #

type timeval struct {
tv_sec int32
tv_usec int32
}

timeval struct #

type timeval struct {
tv_sec int64
tv_usec int32
pad_cgo_0 [4]byte
}

timeval struct #

type timeval struct {
tv_sec int64
tv_usec int32
_ [4]byte
}

timeval struct #

type timeval struct {
tv_sec int64
tv_usec int32
pad_cgo_0 [4]byte
}

timeval struct #

type timeval struct {
tv_sec int64
tv_usec int64
}

timeval struct #

type timeval struct {
tv_sec int64
tv_usec int32
pad_cgo_0 [4]byte
}

timeval struct #

type timeval struct {
tv_sec int64
tv_usec int64
}

timeval struct #

type timeval struct {
tv_sec int64
tv_usec int64
}

timeval struct #

type timeval struct {
tv_sec int32
tv_usec int32
}

timeval struct #

type timeval struct {
tv_sec int64
tv_usec int32
}

timeval struct #

type timeval struct {
tv_sec int64
tv_usec int32
pad_cgo_0 [4]byte
}

timeval struct #

type timeval struct {
tv_sec int64
tv_usec int32
_ [4]byte
}

timeval struct #

type timeval struct {
tv_sec int64
tv_usec int64
}

timeval struct #

type timeval struct {
tv_sec int64
tv_usec int64
}

timeval struct #

type timeval struct {
tv_sec int64
tv_usec int64
}

tos struct #

type tos struct {
prof struct{...}
cyclefreq uint64
kcycles int64
pcycles int64
pid uint32
clock uint32
}

traceAdvancerState struct #

type traceAdvancerState struct {
timer *wakeableSleep
done chan struct{...}
}

traceBuf struct #

traceBuf is per-M tracing buffer. TODO(mknyszek): Rename traceBuf to traceBatch, since they map 1:1 with event batches.

type traceBuf struct {
_ sys.NotInHeap
traceBufHeader
arr [*ast.BinaryExpr]byte
}

traceBufHeader struct #

traceBufHeader is per-P tracing buffer.

type traceBufHeader struct {
link *traceBuf
lastTime traceTime
pos int
lenPos int
}

traceBufQueue struct #

traceBufQueue is a FIFO of traceBufs.

type traceBufQueue struct {
head *traceBuf
tail *traceBuf
}

traceEventWriter struct #

traceEventWriter is the high-level API for writing trace events. See the comment on traceWriter about style for more details as to why this type and its methods are structured the way they are.

type traceEventWriter struct {
tl traceLocker
}

traceFrame struct #

type traceFrame struct {
PC uintptr
funcID uint64
fileID uint64
line uint64
}

traceLocker struct #

traceLocker represents an M writing trace events. While a traceLocker value is valid, the tracer observes all operations on the G/M/P or trace events being written as happening atomically.

type traceLocker struct {
mp *m
gen uintptr
}

traceMap struct #

type traceMap struct {
root atomic.UnsafePointer
_ cpu.CacheLinePad
seq atomic.Uint64
_ cpu.CacheLinePad
mem traceRegionAlloc
}

traceMapNode struct #

traceMapNode is an implementation of a lock-free append-only hash-trie (a trie of the hash bits). Key features: - 4-ary trie. Child nodes are indexed by the upper 2 (remaining) bits of the hash. For example, top level uses bits [63:62], next level uses [61:60] and so on. - New nodes are placed at the first empty level encountered. - When the first child is added to a node, the existing value is not moved into a child. This means that you must check the key at each level, not just at the leaf. - No deletion or rebalancing. - Intentionally devolves into a linked list on hash collisions (the hash bits will all get shifted out during iteration, and new nodes will just be appended to the 0th child).

type traceMapNode struct {
_ sys.NotInHeap
children [4]atomic.UnsafePointer
hash uintptr
id uint64
data []byte
}

traceRegionAlloc struct #

traceRegionAlloc is a thread-safe region allocator. It holds a linked list of traceRegionAllocBlock.

type traceRegionAlloc struct {
lock mutex
dropping atomic.Bool
current atomic.UnsafePointer
full *traceRegionAllocBlock
}

traceRegionAllocBlock struct #

traceRegionAllocBlock is a block in traceRegionAlloc. traceRegionAllocBlock is allocated from non-GC'd memory, so it must not contain heap pointers. Writes to pointers to traceRegionAllocBlocks do not need write barriers.

type traceRegionAllocBlock struct {
_ sys.NotInHeap
traceRegionAllocBlockHeader
data [traceRegionAllocBlockData]byte
}

traceRegionAllocBlockHeader struct #

type traceRegionAllocBlockHeader struct {
next *traceRegionAllocBlock
off atomic.Uintptr
}

traceSchedResourceState struct #

traceSchedResourceState is shared state for scheduling resources (i.e. fields common to both Gs and Ps).

type traceSchedResourceState struct {
statusTraced [3]atomic.Uint32
seq [2]uint64
}

traceStackTable struct #

traceStackTable maps stack traces (arrays of PC's) to unique uint32 ids. It is lock-free for reading.

type traceStackTable struct {
tab traceMap
}

traceStringTable struct #

traceStringTable is map of string -> unique ID that also manages writing strings out into the trace.

type traceStringTable struct {
lock mutex
buf *traceBuf
tab traceMap
}

traceTypeTable struct #

traceTypeTable maps stack traces (arrays of PC's) to unique uint32 ids. It is lock-free for reading.

type traceTypeTable struct {
tab traceMap
}

traceWriter struct #

traceWriter is the interface for writing all trace data. This type is passed around as a value, and all of its methods return a new traceWriter. This allows for chaining together calls in a fluent-style API. This is partly stylistic, and very slightly for performance, since the compiler can destructure this value and pass it between calls as just regular arguments. However, this style is not load-bearing, and we can change it if it's deemed too error-prone.

type traceWriter struct {
traceLocker
exp traceExperiment
*traceBuf
}

tracestat struct #

type tracestat struct {
active bool
id uint64
allocs uint64
bytes uint64
}

typeCacheBucket struct #

type typeCacheBucket struct {
t [typeCacheAssoc]*_type
}

typePointers struct #

typePointers is an iterator over the pointers in a heap object. Iteration through this type implements the tiling algorithm described at the top of this file.

type typePointers struct {
elem uintptr
addr uintptr
mask uintptr
typ *_type
}

ucontext struct #

type ucontext struct {
__sc_onstack int32
pad_cgo_0 [4]byte
uc_sigmask sigset
__sc_error int32
pad_cgo_1 [4]byte
uc_mcontext context64
uc_link *ucontext
uc_stack stackt
__extctx uintptr
__extctx_magic int32
__pad int32
}

ucontext struct #

type ucontext struct {
uc_flags uint64
uc_link *ucontext
uc_stack stackt
uc_sigmask uint64
_pad [*ast.BinaryExpr]byte
_pad2 [8]byte
uc_mcontext sigcontext
}

ucontext struct #

type ucontext struct {
uc_flags uint64
uc_link *ucontext
uc_stack stackt
uc_sigmask usigset
uc_x_unused [0]uint8
uc_pad_cgo_0 [8]byte
uc_mcontext sigcontext
}

ucontext struct #

type ucontext struct {
uc_sigmask sigset
uc_mcontext mcontext
uc_link *ucontext
uc_stack stackt
uc_flags int32
__spare__ [4]int32
pad_cgo_0 [12]byte
}

ucontext struct #

type ucontext struct {
uc_flags uint64
uc_link *ucontext
uc_stack stackt
uc_sigmask uint64
__unused [15]uint64
uc_mcontext sigcontext
}

ucontext struct #

type ucontext struct {
uc_onstack int32
uc_sigmask uint32
uc_stack stackt
uc_link *ucontext
uc_mcsize uint64
uc_mcontext *mcontext64
}

ucontext struct #

type ucontext struct {
uc_sigmask sigset
uc_mcontext mcontext
uc_link *ucontext
uc_stack stackt
uc_flags int32
__spare__ [4]int32
}

ucontext struct #

type ucontext struct {
uc_flags uint64
uc_link *ucontext
uc_stack stackt
uc_mcontext sigcontext
uc_sigmask uint64
}

ucontext struct #

type ucontext struct {
uc_sigmask sigset
uc_mcontext mcontext
uc_link *ucontext
uc_stack stackt
uc_flags int32
__spare__ [4]int32
pad_cgo_0 [12]byte
}

ucontext struct #

type ucontext struct {
uc_flags uint64
uc_link *ucontext
uc_stack stackt
uc_sigmask uint64
__unused [15]uint64
uc_mcontext sigcontext
}

ucontext struct #

type ucontext struct {
uc_flags uint64
uc_link *ucontext
uc_stack stackt
uc_sigmask usigset
uc_x__unused [0]uint8
uc_pad_cgo_0 [8]byte
uc_mcontext sigcontext
}

ucontext struct #

type ucontext struct {
uc_sigmask sigset
uc_mcontext mcontext
uc_link *ucontext
uc_stack stackt
uc_flags int32
__spare__ [4]int32
pad_cgo_0 [12]byte
}

ucontext struct #

type ucontext struct {
uc_flags uint32
uc_link *ucontext
uc_stack stackt
Pad_cgo_0 [4]byte
uc_mcontext sigcontext
uc_sigmask [4]uint32
}

ucontext struct #

type ucontext struct {
uc_flags uint32
uc_link *ucontext
uc_stack stackt
uc_mcontext sigcontext
uc_sigmask uint32
}

ucontext struct #

type ucontext struct {
uc_flags uint64
uc_link *ucontext
uc_stack stackt
uc_mcontext mcontext
uc_sigmask usigset
__fpregs_mem fpstate
}

ucontext struct #

type ucontext struct {
uc_flags uint32
uc_link *ucontext
uc_stack stackt
uc_mcontext sigcontext
uc_sigmask uint32
__unused [31]int32
uc_regspace [128]uint32
}

ucontext struct #

type ucontext struct {
uc_sigmask sigset
pad_cgo_0 [48]byte
uc_mcontext mcontext
uc_link *ucontext
uc_stack stackt
__spare__ [8]int32
}

ucontext struct #

type ucontext struct {
uc_sigmask sigset
uc_mcontext mcontext
uc_link *ucontext
uc_stack stackt
uc_flags int32
__spare__ [4]int32
pad_cgo_0 [12]byte
}

ucontext struct #

type ucontext struct {
uc_flags uint64
uc_link *ucontext
uc_stack stackt
uc_mcontext sigcontext
uc_sigmask uint64
}

ucontext struct #

type ucontext struct {
uc_onstack int32
uc_sigmask uint32
uc_stack stackt
uc_link *ucontext
uc_mcsize uint64
uc_mcontext *mcontext64
}

ucontext struct #

type ucontext struct {
uc_flags uint64
uc_link *ucontext
uc_sigmask sigset
uc_stack stackt
pad_cgo_0 [8]byte
uc_mcontext mcontext
uc_filler [5]int64
pad_cgo_1 [8]byte
}

ucontextt struct #

type ucontextt struct {
uc_flags uint32
pad_cgo_0 [4]byte
uc_link *ucontextt
uc_sigmask sigset
uc_stack stackt
uc_mcontext mcontextt
}

ucontextt struct #

type ucontextt struct {
uc_flags uint32
uc_link *ucontextt
uc_sigmask sigset
uc_stack stackt
_ [4]byte
uc_mcontext mcontextt
__uc_pad [2]int32
}

ucontextt struct #

type ucontextt struct {
uc_flags uint32
uc_link *ucontextt
uc_sigmask sigset
uc_stack stackt
_ [4]byte
uc_mcontext mcontextt
__uc_pad [2]int32
}

ucontextt struct #

type ucontextt struct {
uc_flags uint32
uc_link *ucontextt
uc_sigmask sigset
uc_stack stackt
uc_mcontext mcontextt
__uc_pad [4]int32
}

umtx_time struct #

type umtx_time struct {
_timeout timespec
_flags uint32
_clockid uint32
}

umtx_time struct #

type umtx_time struct {
_timeout timespec
_flags uint32
_clockid uint32
}

umtx_time struct #

type umtx_time struct {
_timeout timespec
_flags uint32
_clockid uint32
}

umtx_time struct #

type umtx_time struct {
_timeout timespec
_flags uint32
_clockid uint32
}

umtx_time struct #

type umtx_time struct {
_timeout timespec
_flags uint32
_clockid uint32
}

unwinder struct #

An unwinder iterates the physical stack frames of a Go sack. Typical use of an unwinder looks like: var u unwinder for u.init(gp, 0); u.valid(); u.next() { // ... use frame info in u ... } Implementation note: This is carefully structured to be pointer-free because tracebacks happen in places that disallow write barriers (e.g., signals). Even if this is stack-allocated, its pointer-receiver methods don't know that their receiver is on the stack, so they still emit write barriers. Here we address that by carefully avoiding any pointers in this type. Another approach would be to split this into a mutable part that's passed by pointer but contains no pointers itself and an immutable part that's passed and returned by value and can contain pointers. We could potentially hide that we're doing that in trivial methods that are inlined into the caller that has the stack allocation, but that's fragile.

type unwinder struct {
frame stkframe
g guintptr
cgoCtxt int
calleeFuncID abi.FuncID
flags unwindFlags
}

ureg struct #

type ureg struct {
di uint32
si uint32
bp uint32
nsp uint32
bx uint32
dx uint32
cx uint32
ax uint32
gs uint32
fs uint32
es uint32
ds uint32
trap uint32
ecode uint32
pc uint32
cs uint32
flags uint32
sp uint32
ss uint32
}

ureg struct #

type ureg struct {
ax uint64
bx uint64
cx uint64
dx uint64
si uint64
di uint64
bp uint64
r8 uint64
r9 uint64
r10 uint64
r11 uint64
r12 uint64
r13 uint64
r14 uint64
r15 uint64
ds uint16
es uint16
fs uint16
gs uint16
_type uint64
error uint64
ip uint64
cs uint64
flags uint64
sp uint64
ss uint64
}

ureg struct #

type ureg struct {
r0 uint32
r1 uint32
r2 uint32
r3 uint32
r4 uint32
r5 uint32
r6 uint32
r7 uint32
r8 uint32
r9 uint32
r10 uint32
r11 uint32
r12 uint32
sp uint32
link uint32
trap uint32
psr uint32
pc uint32
}

userArena struct #

type userArena struct {
fullList *mspan
active *mspan
refs []unsafe.Pointer
defunct atomic.Bool
}

user_fpregs_struct struct #

type user_fpregs_struct struct {
f [528]byte
}

user_regs_struct struct #

type user_regs_struct struct {
pc uint64
ra uint64
sp uint64
gp uint64
tp uint64
t0 uint64
t1 uint64
t2 uint64
s0 uint64
s1 uint64
a0 uint64
a1 uint64
a2 uint64
a3 uint64
a4 uint64
a5 uint64
a6 uint64
a7 uint64
s2 uint64
s3 uint64
s4 uint64
s5 uint64
s6 uint64
s7 uint64
s8 uint64
s9 uint64
s10 uint64
s11 uint64
t3 uint64
t4 uint64
t5 uint64
t6 uint64
}

usigactiont struct #

type usigactiont struct {
__sigaction_u [8]byte
sa_mask uint32
sa_flags int32
}

usigactiont struct #

type usigactiont struct {
__sigaction_u [8]byte
sa_mask uint32
sa_flags int32
}

usigset struct #

type usigset struct {
__val [16]uint64
}

usigset struct #

type usigset struct {
us_x__val [16]uint64
}

usigset struct #

type usigset struct {
val [16]uint64
}

usigset struct #

type usigset struct {
__val [16]uint64
}

vdsoInfo struct #

type vdsoInfo struct {
valid bool
loadAddr uintptr
loadOffset uintptr
symtab *[vdsoSymTabSize]elfSym
symstrings *[vdsoSymStringsSize]byte
chain []uint32
bucket []uint32
symOff uint32
isGNUHash bool
versym *[vdsoVerSymSize]uint16
verdef *elfVerdef
}

vdsoSymbolKey struct #

type vdsoSymbolKey struct {
name string
symHash uint32
gnuHash uint32
ptr *uintptr
}

vdsoTimehands struct #

type vdsoTimehands struct {
algo uint32
gen uint32
scale uint64
offset_count uint32
counter_mask uint32
offset bintime
boottime bintime
x86_shift uint32
x86_hpet_idx uint32
res [6]uint32
}

vdsoTimehands struct #

type vdsoTimehands struct {
algo uint32
gen uint32
scale uint64
offset_count uint32
counter_mask uint32
offset bintime
boottime bintime
physical uint32
res [7]uint32
}

vdsoTimehands struct #

type vdsoTimehands struct {
algo uint32
gen uint32
scale uint64
offset_count uint32
counter_mask uint32
offset bintime
boottime bintime
physical uint32
res [7]uint32
}

vdsoTimehands struct #

type vdsoTimehands struct {
algo uint32
gen uint32
scale uint64
offset_count uint32
counter_mask uint32
offset bintime
boottime bintime
x86_shift uint32
x86_hpet_idx uint32
res [6]uint32
}

vdsoTimehands struct #

type vdsoTimehands struct {
algo uint32
gen uint32
scale uint64
offset_count uint32
counter_mask uint32
offset bintime
boottime bintime
physical uint32
res [7]uint32
}

vdsoTimekeep struct #

type vdsoTimekeep struct {
ver uint32
enabled uint32
current uint32
pad_cgo_0 [4]byte
}

vdsoTimekeep struct #

type vdsoTimekeep struct {
ver uint32
enabled uint32
current uint32
pad_cgo_0 [4]byte
}

vdsoTimekeep struct #

type vdsoTimekeep struct {
ver uint32
enabled uint32
current uint32
pad_cgo_0 [4]byte
}

vdsoTimekeep struct #

type vdsoTimekeep struct {
ver uint32
enabled uint32
current uint32
pad_cgo_0 [4]byte
}

vdsoTimekeep struct #

type vdsoTimekeep struct {
ver uint32
enabled uint32
current uint32
}

vdsoVersionKey struct #

type vdsoVersionKey struct {
version string
verHash uint32
}

vreg struct #

type vreg struct {
u [4]uint32
}

vreg struct #

type vreg struct {
u [4]uint32
}

waitq struct #

type waitq struct {
first *sudog
last *sudog
}

wakeableSleep struct #

wakeableSleep manages a wakeable goroutine sleep. Users of this type must call init before first use and close to free up resources. Once close is called, init must be called before another use.

type wakeableSleep struct {
timer *timer
lock mutex
wakeup chan struct{...}
}

wbBuf struct #

wbBuf is a per-P buffer of pointers queued by the write barrier. This buffer is flushed to the GC workbufs when it fills up and on various GC transitions. This is closely related to a "sequential store buffer" (SSB), except that SSBs are usually used for maintaining remembered sets, while this is used for marking.

type wbBuf struct {
next uintptr
end uintptr
buf [wbBufEntries]uintptr
}

winCallback struct #

winCallback records information about a registered Go callback.

type winCallback struct {
fn *funcval
retPop uintptr
abiMap abiDesc
}

winCallbackKey struct #

type winCallbackKey struct {
fn *funcval
cdecl bool
}

winlibcall struct #

winlibcall is not implemented on non-Windows systems, but it is used in non-OS-specific parts of the runtime. Define it as an empty struct to avoid wasting stack space.

type winlibcall struct {

}

workType struct #

type workType struct {
full lfstack
_ cpu.CacheLinePad
empty lfstack
_ cpu.CacheLinePad
wbufSpans struct{...}
_ uint32
bytesMarked uint64
markrootNext uint32
markrootJobs uint32
nproc uint32
tstart int64
nwait uint32
nDataRoots int
nBSSRoots int
nSpanRoots int
nStackRoots int
baseData uint32
baseBSS uint32
baseSpans uint32
baseStacks uint32
baseEnd uint32
stackRoots []*g
startSema uint32
markDoneSema uint32
bgMarkDone uint32
mode gcMode
userForced bool
initialHeapLive uint64
assistQueue struct{...}
sweepWaiters struct{...}
strongFromWeak struct{...}
cycles atomic.Uint32
stwprocs int32
maxprocs int32
tSweepTerm int64
tMark int64
tMarkTerm int64
tEnd int64
pauseNS int64
heap0 uint64
heap1 uint64
heap2 uint64
cpuStats
}

workbuf struct #

type workbuf struct {
_ sys.NotInHeap
workbufhdr
obj [*ast.BinaryExpr]uintptr
}

workbufhdr struct #

type workbufhdr struct {
node lfnode
nobj int
}

worldStop struct #

worldStop provides context from the stop-the-world required by the start-the-world.

type worldStop struct {
reason stwReason
startedStopping int64
finishedStopping int64
stoppingCPUTime int64
}

writeUserArenaHeapBits struct #

type writeUserArenaHeapBits struct {
offset uintptr
mask uintptr
valid uintptr
low uintptr
}

xmmreg struct #

type xmmreg struct {
element [4]uint32
}

xmmreg struct #

type xmmreg struct {
element [4]uint32
}

xmmreg1 struct #

type xmmreg1 struct {
element [4]uint32
}

Functions

ASanRead function #

Public address sanitizer API.

func ASanRead(addr unsafe.Pointer, len int)

ASanWrite function #

func ASanWrite(addr unsafe.Pointer, len int)

Add method #

go:nosplit

func (bt *bintime) Add(bt2 *bintime)

AddCleanup function #

AddCleanup attaches a cleanup function to ptr. Some time after ptr is no longer reachable, the runtime will call cleanup(arg) in a separate goroutine. A typical use is that ptr is an object wrapping an underlying resource (e.g., a File object wrapping an OS file descriptor), arg is the underlying resource (e.g., the OS file descriptor), and the cleanup function releases the underlying resource (e.g., by calling the close system call). There are few constraints on ptr. In particular, multiple cleanups may be attached to the same pointer, or to different pointers within the same allocation. If ptr is reachable from cleanup or arg, ptr will never be collected and the cleanup will never run. As a protection against simple cases of this, AddCleanup panics if arg is equal to ptr. There is no specified order in which cleanups will run. In particular, if several objects point to each other and all become unreachable at the same time, their cleanups all become eligible to run and can run in any order. This is true even if the objects form a cycle. Cleanups run concurrently with any user-created goroutines. Cleanups may also run concurrently with one another (unlike finalizers). If a cleanup function must run for a long time, it should create a new goroutine to avoid blocking the execution of other cleanups. If ptr has both a cleanup and a finalizer, the cleanup will only run once it has been finalized and becomes unreachable without an associated finalizer. The cleanup(arg) call is not always guaranteed to run; in particular it is not guaranteed to run before program exit. Cleanups are not guaranteed to run if the size of T is zero bytes, because it may share same address with other zero-size objects in memory. See https://go.dev/ref/spec#Size_and_alignment_guarantees. It is not guaranteed that a cleanup will run for objects allocated in initializers for package-level variables. Such objects may be linker-allocated, not heap-allocated. Note that because cleanups may execute arbitrarily far into the future after an object is no longer referenced, the runtime is allowed to perform a space-saving optimization that batches objects together in a single allocation slot. The cleanup for an unreferenced object in such an allocation may never run if it always exists in the same batch as a referenced object. Typically, this batching only happens for tiny (on the order of 16 bytes or less) and pointer-free objects. A cleanup may run as soon as an object becomes unreachable. In order to use cleanups correctly, the program must ensure that the object is reachable until it is safe to run its cleanup. Objects stored in global variables, or that can be found by tracing pointers from a global variable, are reachable. A function argument or receiver may become unreachable at the last point where the function mentions it. To ensure a cleanup does not get called prematurely, pass the object to the [KeepAlive] function after the last point where the object must remain reachable.

func AddCleanup(ptr *T, cleanup func(S), arg S) Cleanup

AddX method #

go:nosplit

func (bt *bintime) AddX(x uint64)

Addr method #

Addr returns the memory address where a fault occurred. The address provided is best-effort. The veracity of the result may depend on the platform. Errors providing this method will only be returned as a result of using [runtime/debug.SetPanicOnFault].

func (e errorAddressString) Addr() uintptr

BlockProfile function #

BlockProfile returns n, the number of records in the current blocking profile. If len(p) >= n, BlockProfile copies the profile into p and returns n, true. If len(p) < n, BlockProfile does not change p and returns n, false. Most clients should use the [runtime/pprof] package or the [testing] package's -test.blockprofile flag instead of calling BlockProfile directly.

func BlockProfile(p []BlockProfileRecord) (n int, ok bool)

Breakpoint function #

Breakpoint executes a breakpoint trap.

func Breakpoint()

CPUProfile function #

CPUProfile panics. It formerly provided raw access to chunks of a pprof-format profile generated by the runtime. The details of generating that format have changed, so this functionality has been removed. Deprecated: Use the [runtime/pprof] package, or the handlers in the [net/http/pprof] package, or the [testing] package's -test.cpuprofile flag instead.

func CPUProfile() []byte

Caller function #

Caller reports file and line number information about function invocations on the calling goroutine's stack. The argument skip is the number of stack frames to ascend, with 0 identifying the caller of Caller. (For historical reasons the meaning of skip differs between Caller and [Callers].) The return values report the program counter, the file name (using forward slashes as path separator, even on Windows), and the line number within the file of the corresponding call. The boolean ok is false if it was not possible to recover the information.

func Caller(skip int) (pc uintptr, file string, line int, ok bool)

Callers function #

Callers fills the slice pc with the return program counters of function invocations on the calling goroutine's stack. The argument skip is the number of stack frames to skip before recording in pc, with 0 identifying the frame for Callers itself and 1 identifying the caller of Callers. It returns the number of entries written to pc. To translate these PCs into symbolic information such as function names and line numbers, use [CallersFrames]. CallersFrames accounts for inlined functions and adjusts the return program counters into call program counters. Iterating over the returned slice of PCs directly is discouraged, as is using [FuncForPC] on any of the returned PCs, since these cannot account for inlining or return program counter adjustment.

func Callers(skip int, pc []uintptr) int

CallersFrames function #

CallersFrames takes a slice of PC values returned by [Callers] and prepares to return function/file/line information. Do not change the slice until you are done with the [Frames].

func CallersFrames(callers []uintptr) *Frames

Clear method #

Clear attempts to store minOffAddr in atomicOffAddr. It may fail if a marked value is placed in the box in the meanwhile.

func (b *atomicOffAddr) Clear()

CompareAndSwap method #

func (p *goroutineProfileStateHolder) CompareAndSwap(old goroutineProfileState, new goroutineProfileState) bool

Entry method #

Entry returns the entry address of the function.

func (f *Func) Entry() uintptr

Error method #

func (e plainError) Error() string

Error method #

func (*PanicNilError) Error() string

Error method #

func (e boundsError) Error() string

Error method #

func (e errorAddressString) Error() string

Error method #

func (e *TypeAssertionError) Error() string

Error method #

func (e errorString) Error() string

FileLine method #

FileLine returns the file name and line number of the source code corresponding to the program counter pc. The result will not be accurate if pc is not a program counter within f.

func (f *Func) FileLine(pc uintptr) (file string, line int)

FuncForPC function #

FuncForPC returns a *[Func] describing the function that contains the given program counter address, or else nil. If pc represents multiple functions because of inlining, it returns the *Func describing the innermost function, but with an entry of the outermost function.

func FuncForPC(pc uintptr) *Func

GC function #

GC runs a garbage collection and blocks the caller until the garbage collection is complete. It may also block the entire program.

func GC()

GCActive method #

GCActive traces a GCActive event. Must be emitted by an actively running goroutine on an active P. This restriction can be changed easily and only depends on where it's currently called.

func (tl traceLocker) GCActive()

GCDone method #

GCDone traces a GCEnd event. Must be emitted by an actively running goroutine on an active P. This restriction can be changed easily and only depends on where it's currently called.

func (tl traceLocker) GCDone()

GCMarkAssistDone method #

GCMarkAssistDone emits a MarkAssistEnd event.

func (tl traceLocker) GCMarkAssistDone()

GCMarkAssistStart method #

GCMarkAssistStart emits a MarkAssistBegin event.

func (tl traceLocker) GCMarkAssistStart()

GCStart method #

GCStart traces a GCBegin event. Must be emitted by an actively running goroutine on an active P. This restriction can be changed easily and only depends on where it's currently called.

func (tl traceLocker) GCStart()

GCSweepDone method #

GCSweepDone finishes tracing a sweep loop. If any memory was swept (i.e. traceGCSweepSpan emitted an event) then this will emit a GCSweepEnd event. Must be called with a valid P.

func (tl traceLocker) GCSweepDone()

GCSweepSpan method #

GCSweepSpan traces the sweep of a single span. If this is the first span swept since traceGCSweepStart was called, this will emit a GCSweepBegin event. This may be called outside a traceGCSweepStart/traceGCSweepDone pair; however, it will not emit any trace events in this case. Must be called with a valid P.

func (tl traceLocker) GCSweepSpan(bytesSwept uintptr)

GCSweepStart method #

GCSweepStart prepares to trace a sweep loop. This does not emit any events until traceGCSweepSpan is called. GCSweepStart must be paired with traceGCSweepDone and there must be no preemption points between these two calls. Must be called with a valid P.

func (tl traceLocker) GCSweepStart()

GOMAXPROCS function #

GOMAXPROCS sets the maximum number of CPUs that can be executing simultaneously and returns the previous setting. It defaults to the value of [runtime.NumCPU]. If n < 1, it does not change the current setting. This call will go away when the scheduler improves.

func GOMAXPROCS(n int) int

GOROOT function #

GOROOT returns the root of the Go tree. It uses the GOROOT environment variable, if set at process start, or else the root used during the Go build. Deprecated: The root used during the Go build will not be meaningful if the binary is copied to another machine. Use the system path to locate the “go” binary, and use “go env GOROOT” to find its GOROOT.

func GOROOT() string

GoCreate method #

GoCreate emits a GoCreate event.

func (tl traceLocker) GoCreate(newg *g, pc uintptr, blocked bool)

GoCreateSyscall method #

GoCreateSyscall indicates that a goroutine has transitioned from dead to GoSyscall. Unlike GoCreate, the caller must be running on gp. This occurs when C code calls into Go. On pthread platforms it occurs only when a C thread calls into Go code for the first time.

func (tl traceLocker) GoCreateSyscall(gp *g)

GoDestroySyscall method #

GoDestroySyscall indicates that a goroutine has transitioned from GoSyscall to dead. Must not have a P. This occurs when Go code returns back to C. On pthread platforms it occurs only when the C thread is destroyed.

func (tl traceLocker) GoDestroySyscall()

GoEnd method #

GoEnd emits a GoDestroy event. TODO(mknyszek): Rename this to GoDestroy.

func (tl traceLocker) GoEnd()

GoPark method #

GoPark emits a GoBlock event with the provided reason. TODO(mknyszek): Replace traceBlockReason with waitReason. It's silly that we have both, and waitReason is way more descriptive.

func (tl traceLocker) GoPark(reason traceBlockReason, skip int)

GoPreempt method #

GoPreempt emits a GoStop event with a GoPreempted reason.

func (tl traceLocker) GoPreempt()

GoSched method #

GoSched emits a GoStop event with a GoSched reason.

func (tl traceLocker) GoSched()

GoStart method #

GoStart emits a GoStart event. Must be called with a valid P.

func (tl traceLocker) GoStart()

GoStop method #

GoStop emits a GoStop event with the provided reason.

func (tl traceLocker) GoStop(reason traceGoStopReason)

GoSwitch method #

GoSwitch emits a GoSwitch event. If destroy is true, the calling goroutine is simultaneously being destroyed.

func (tl traceLocker) GoSwitch(nextg *g, destroy bool)

GoSysCall method #

GoSysCall emits a GoSyscallBegin event. Must be called with a valid P.

func (tl traceLocker) GoSysCall()

GoSysExit method #

GoSysExit emits a GoSyscallEnd event, possibly along with a GoSyscallBlocked event if lostP is true. lostP must be true in all cases that a goroutine loses its P during a syscall. This means it's not sufficient to check if it has no P. In particular, it needs to be true in the following cases: - The goroutine lost its P, it ran some other code, and then got it back. It's now running with that P. - The goroutine lost its P and was unable to reacquire it, and is now running without a P. - The goroutine lost its P and acquired a different one, and is now running with that P.

func (tl traceLocker) GoSysExit(lostP bool)

GoUnpark method #

GoUnpark emits a GoUnblock event.

func (tl traceLocker) GoUnpark(gp *g, skip int)

Goexit function #

Goexit terminates the goroutine that calls it. No other goroutine is affected. Goexit runs all deferred calls before terminating the goroutine. Because Goexit is not a panic, any recover calls in those deferred functions will return nil. Calling Goexit from the main goroutine terminates that goroutine without func main returning. Since func main has not returned, the program continues execution of other goroutines. If all other goroutines exit, the program crashes. It crashes if called from a thread not created by the Go runtime.

func Goexit()

Gomaxprocs method #

Gomaxprocs emits a ProcsChange event.

func (tl traceLocker) Gomaxprocs(procs int32)

GoroutineProfile function #

GoroutineProfile returns n, the number of records in the active goroutine stack profile. If len(p) >= n, GoroutineProfile copies the profile into p and returns n, true. If len(p) < n, GoroutineProfile does not change p and returns n, false. Most clients should use the [runtime/pprof] package instead of calling GoroutineProfile directly.

func GoroutineProfile(p []StackRecord) (n int, ok bool)

GoroutineStackAlloc method #

GoroutineStackAlloc records that a goroutine stack was newly allocated at address base with the provided size..

func (tl traceLocker) GoroutineStackAlloc(base uintptr, size uintptr)

GoroutineStackExists method #

GoroutineStackExists records that a goroutine stack already exists at address base with the provided size.

func (tl traceLocker) GoroutineStackExists(base uintptr, size uintptr)

GoroutineStackFree method #

GoroutineStackFree records that a goroutine stack at address base is about to be freed.

func (tl traceLocker) GoroutineStackFree(base uintptr)

Gosched function #

Gosched yields the processor, allowing other goroutines to run. It does not suspend the current goroutine, so execution resumes automatically. go:nosplit

func Gosched()

HeapAlloc method #

HeapAlloc emits a HeapAlloc event.

func (tl traceLocker) HeapAlloc(live uint64)

HeapGoal method #

HeapGoal reads the current heap goal and emits a HeapGoal event.

func (tl traceLocker) HeapGoal()

HeapObjectAlloc method #

HeapObjectAlloc records that an object was newly allocated at addr with the provided type. The type is optional, and the size of the slot occupied the object is inferred from the span containing it.

func (tl traceLocker) HeapObjectAlloc(addr uintptr, typ *abi.Type)

HeapObjectExists method #

HeapObjectExists records that an object already exists at addr with the provided type. The type is optional, and the size of the slot occupied the object is inferred from the span containing it.

func (tl traceLocker) HeapObjectExists(addr uintptr, typ *abi.Type)

HeapObjectFree method #

HeapObjectFree records that an object at addr is about to be freed.

func (tl traceLocker) HeapObjectFree(addr uintptr)

InUseBytes method #

InUseBytes returns the number of bytes in use (AllocBytes - FreeBytes).

func (r *MemProfileRecord) InUseBytes() int64

InUseObjects method #

InUseObjects returns the number of objects in use (AllocObjects - FreeObjects).

func (r *MemProfileRecord) InUseObjects() int64

IncNonDefault method #

func (g *godebugInc) IncNonDefault()

KeepAlive function #

KeepAlive marks its argument as currently reachable. This ensures that the object is not freed, and its finalizer is not run, before the point in the program where KeepAlive is called. A very simplified example showing where KeepAlive is required: type File struct { d int } d, err := syscall.Open("/file/path", syscall.O_RDONLY, 0) // ... do something if err != nil ... p := &File{d} runtime.SetFinalizer(p, func(p *File) { syscall.Close(p.d) }) var buf [10]byte n, err := syscall.Read(p.d, buf[:]) // Ensure p is not finalized until Read returns. runtime.KeepAlive(p) // No more uses of p after this point. Without the KeepAlive call, the finalizer could run at the start of [syscall.Read], closing the file descriptor before syscall.Read makes the actual system call. Note: KeepAlive should only be used to prevent finalizers from running prematurely. In particular, when used with [unsafe.Pointer], the rules for valid uses of unsafe.Pointer still apply.

func KeepAlive(x any)

Load method #

Load returns the address in the box as a virtual address. It also returns if the value was marked or not.

func (b *atomicOffAddr) Load() (uintptr, bool)

Load method #

func (p *goroutineProfileStateHolder) Load() goroutineProfileState

Load method #

Load returns the *mspan.

func (p *atomicMSpanPointer) Load() *mspan

Load method #

Loads the spanSetSpinePointer and returns it. It has the same semantics as atomic.UnsafePointer.

func (s *atomicSpanSetSpinePointer) Load() spanSetSpinePointer

LockOSThread function #

LockOSThread wires the calling goroutine to its current operating system thread. The calling goroutine will always execute in that thread, and no other goroutine will execute in it, until the calling goroutine has made as many calls to [UnlockOSThread] as to LockOSThread. If the calling goroutine exits without unlocking the thread, the thread will be terminated. All init functions are run on the startup thread. Calling LockOSThread from an init function will cause the main function to be invoked on that thread. A goroutine should call LockOSThread before calling OS services or non-Go library functions that depend on per-thread state. go:nosplit

func LockOSThread()

MSanRead function #

func MSanRead(addr unsafe.Pointer, len int)

MSanWrite function #

func MSanWrite(addr unsafe.Pointer, len int)

MemProfile function #

MemProfile returns a profile of memory allocated and freed per allocation site. MemProfile returns n, the number of records in the current memory profile. If len(p) >= n, MemProfile copies the profile into p and returns n, true. If len(p) < n, MemProfile does not change p and returns n, false. If inuseZero is true, the profile includes allocation records where r.AllocBytes > 0 but r.AllocBytes == r.FreeBytes. These are sites where memory was allocated, but it has all been released back to the runtime. The returned profile may be up to two garbage collection cycles old. This is to avoid skewing the profile toward allocations; because allocations happen in real time but frees are delayed until the garbage collector performs sweeping, the profile only accounts for allocations that have had a chance to be freed by the garbage collector. Most clients should use the runtime/pprof package or the testing package's -test.memprofile flag instead of calling MemProfile directly.

func MemProfile(p []MemProfileRecord, inuseZero bool) (n int, ok bool)

MutexProfile function #

MutexProfile returns n, the number of records in the current mutex profile. If len(p) >= n, MutexProfile copies the profile into p and returns n, true. Otherwise, MutexProfile does not change p, and returns n, false. Most clients should use the [runtime/pprof] package instead of calling MutexProfile directly.

func MutexProfile(p []BlockProfileRecord) (n int, ok bool)

Name method #

Name returns the name of the function.

func (f *Func) Name() string

Next method #

Next returns a [Frame] representing the next call frame in the slice of PC values. If it has already returned all call frames, Next returns a zero [Frame]. The more result indicates whether the next call to Next will return a valid [Frame]. It does not necessarily indicate whether this call returned one. See the [Frames] example for idiomatic usage.

func (ci *Frames) Next() (frame Frame, more bool)

NumCPU function #

NumCPU returns the number of logical CPUs usable by the current process. The set of available CPUs is checked by querying the operating system at process startup. Changes to operating system CPU allocation after process startup are not reflected.

func NumCPU() int

NumCgoCall function #

NumCgoCall returns the number of cgo calls made by the current process.

func NumCgoCall() int64

NumGoroutine function #

NumGoroutine returns the number of goroutines that currently exist.

func NumGoroutine() int

Pin method #

Pin pins a Go object, preventing it from being moved or freed by the garbage collector until the [Pinner.Unpin] method has been called. A pointer to a pinned object can be directly stored in C memory or can be contained in Go memory passed to C functions. If the pinned object itself contains pointers to Go objects, these objects must be pinned separately if they are going to be accessed from C code. The argument must be a pointer of any type or an [unsafe.Pointer]. It's safe to call Pin on non-Go pointers, in which case Pin will do nothing.

func (p *Pinner) Pin(pointer any)

ProcStart method #

ProcStart traces a ProcStart event. Must be called with a valid P.

func (tl traceLocker) ProcStart()

ProcSteal method #

ProcSteal indicates that our current M stole a P from another M. inSyscall indicates that we're stealing the P from a syscall context. The caller must have ownership of pp.

func (tl traceLocker) ProcSteal(pp *p, inSyscall bool)

ProcStop method #

ProcStop traces a ProcStop event.

func (tl traceLocker) ProcStop(pp *p)

RaceAcquire function #

RaceAcquire/RaceRelease/RaceReleaseMerge establish happens-before relations between goroutines. These inform the race detector about actual synchronization that it can't see for some reason (e.g. synchronization within RaceDisable/RaceEnable sections of code). RaceAcquire establishes a happens-before relation with the preceding RaceReleaseMerge on addr up to and including the last RaceRelease on addr. In terms of the C memory model (C11 §5.1.2.4, §7.17.3), RaceAcquire is equivalent to atomic_load(memory_order_acquire). go:nosplit

func RaceAcquire(addr unsafe.Pointer)

RaceDisable function #

RaceDisable disables handling of race synchronization events in the current goroutine. Handling is re-enabled with RaceEnable. RaceDisable/RaceEnable can be nested. Non-synchronization events (memory accesses, function entry/exit) still affect the race detector. go:nosplit

func RaceDisable()

RaceEnable function #

RaceEnable re-enables handling of race events in the current goroutine. go:nosplit

func RaceEnable()

RaceErrors function #

func RaceErrors() int

RaceRead function #

func RaceRead(addr unsafe.Pointer)

RaceReadRange function #

func RaceReadRange(addr unsafe.Pointer, len int)

RaceRelease function #

RaceRelease performs a release operation on addr that can synchronize with a later RaceAcquire on addr. In terms of the C memory model, RaceRelease is equivalent to atomic_store(memory_order_release). go:nosplit

func RaceRelease(addr unsafe.Pointer)

RaceReleaseMerge function #

RaceReleaseMerge is like RaceRelease, but also establishes a happens-before relation with the preceding RaceRelease or RaceReleaseMerge on addr. In terms of the C memory model, RaceReleaseMerge is equivalent to atomic_exchange(memory_order_release). go:nosplit

func RaceReleaseMerge(addr unsafe.Pointer)

RaceWrite function #

func RaceWrite(addr unsafe.Pointer)

RaceWriteRange function #

func RaceWriteRange(addr unsafe.Pointer, len int)

ReadMemStats function #

ReadMemStats populates m with memory allocator statistics. The returned memory allocator statistics are up to date as of the call to ReadMemStats. This is in contrast with a heap profile, which is a snapshot as of the most recently completed garbage collection cycle.

func ReadMemStats(m *MemStats)

ReadTrace function #

ReadTrace returns the next chunk of binary tracing data, blocking until data is available. If tracing is turned off and all the data accumulated while it was on has been returned, ReadTrace returns nil. The caller must copy the returned data before calling ReadTrace again. ReadTrace must be called from one goroutine at a time.

func ReadTrace() []byte

RuntimeError method #

func (e boundsError) RuntimeError()

RuntimeError method #

func (*PanicNilError) RuntimeError()

RuntimeError method #

func (*TypeAssertionError) RuntimeError()

RuntimeError method #

func (e plainError) RuntimeError()

RuntimeError method #

func (e errorAddressString) RuntimeError()

RuntimeError method #

func (e errorString) RuntimeError()

STWDone method #

STWDone traces a STWEnd event.

func (tl traceLocker) STWDone()

STWStart method #

STWStart traces a STWBegin event.

func (tl traceLocker) STWStart(reason stwReason)

SetBlockProfileRate function #

SetBlockProfileRate controls the fraction of goroutine blocking events that are reported in the blocking profile. The profiler aims to sample an average of one blocking event per rate nanoseconds spent blocked. To include every blocking event in the profile, pass rate = 1. To turn off profiling entirely, pass rate <= 0.

func SetBlockProfileRate(rate int)

SetCPUProfileRate function #

SetCPUProfileRate sets the CPU profiling rate to hz samples per second. If hz <= 0, SetCPUProfileRate turns off profiling. If the profiler is on, the rate cannot be changed without first turning it off. Most clients should use the [runtime/pprof] package or the [testing] package's -test.cpuprofile flag instead of calling SetCPUProfileRate directly.

func SetCPUProfileRate(hz int)

SetCgoTraceback function #

SetCgoTraceback records three C functions to use to gather traceback information from C code and to convert that traceback information into symbolic information. These are used when printing stack traces for a program that uses cgo. The traceback and context functions may be called from a signal handler, and must therefore use only async-signal safe functions. The symbolizer function may be called while the program is crashing, and so must be cautious about using memory. None of the functions may call back into Go. The context function will be called with a single argument, a pointer to a struct: struct { Context uintptr } In C syntax, this struct will be struct { uintptr_t Context; }; If the Context field is 0, the context function is being called to record the current traceback context. It should record in the Context field whatever information is needed about the current point of execution to later produce a stack trace, probably the stack pointer and PC. In this case the context function will be called from C code. If the Context field is not 0, then it is a value returned by a previous call to the context function. This case is called when the context is no longer needed; that is, when the Go code is returning to its C code caller. This permits the context function to release any associated resources. While it would be correct for the context function to record a complete a stack trace whenever it is called, and simply copy that out in the traceback function, in a typical program the context function will be called many times without ever recording a traceback for that context. Recording a complete stack trace in a call to the context function is likely to be inefficient. The traceback function will be called with a single argument, a pointer to a struct: struct { Context uintptr SigContext uintptr Buf *uintptr Max uintptr } In C syntax, this struct will be struct { uintptr_t Context; uintptr_t SigContext; uintptr_t* Buf; uintptr_t Max; }; The Context field will be zero to gather a traceback from the current program execution point. In this case, the traceback function will be called from C code. Otherwise Context will be a value previously returned by a call to the context function. The traceback function should gather a stack trace from that saved point in the program execution. The traceback function may be called from an execution thread other than the one that recorded the context, but only when the context is known to be valid and unchanging. The traceback function may also be called deeper in the call stack on the same thread that recorded the context. The traceback function may be called multiple times with the same Context value; it will usually be appropriate to cache the result, if possible, the first time this is called for a specific context value. If the traceback function is called from a signal handler on a Unix system, SigContext will be the signal context argument passed to the signal handler (a C ucontext_t* cast to uintptr_t). This may be used to start tracing at the point where the signal occurred. If the traceback function is not called from a signal handler, SigContext will be zero. Buf is where the traceback information should be stored. It should be PC values, such that Buf[0] is the PC of the caller, Buf[1] is the PC of that function's caller, and so on. Max is the maximum number of entries to store. The function should store a zero to indicate the top of the stack, or that the caller is on a different stack, presumably a Go stack. Unlike runtime.Callers, the PC values returned should, when passed to the symbolizer function, return the file/line of the call instruction. No additional subtraction is required or appropriate. On all platforms, the traceback function is invoked when a call from Go to C to Go requests a stack trace. On linux/amd64, linux/ppc64le, linux/arm64, and freebsd/amd64, the traceback function is also invoked when a signal is received by a thread that is executing a cgo call. The traceback function should not make assumptions about when it is called, as future versions of Go may make additional calls. The symbolizer function will be called with a single argument, a pointer to a struct: struct { PC uintptr // program counter to fetch information for File *byte // file name (NUL terminated) Lineno uintptr // line number Func *byte // function name (NUL terminated) Entry uintptr // function entry point More uintptr // set non-zero if more info for this PC Data uintptr // unused by runtime, available for function } In C syntax, this struct will be struct { uintptr_t PC; char* File; uintptr_t Lineno; char* Func; uintptr_t Entry; uintptr_t More; uintptr_t Data; }; The PC field will be a value returned by a call to the traceback function. The first time the function is called for a particular traceback, all the fields except PC will be 0. The function should fill in the other fields if possible, setting them to 0/nil if the information is not available. The Data field may be used to store any useful information across calls. The More field should be set to non-zero if there is more information for this PC, zero otherwise. If More is set non-zero, the function will be called again with the same PC, and may return different information (this is intended for use with inlined functions). If More is zero, the function will be called with the next PC value in the traceback. When the traceback is complete, the function will be called once more with PC set to zero; this may be used to free any information. Each call will leave the fields of the struct set to the same values they had upon return, except for the PC field when the More field is zero. The function must not keep a copy of the struct pointer between calls. When calling SetCgoTraceback, the version argument is the version number of the structs that the functions expect to receive. Currently this must be zero. The symbolizer function may be nil, in which case the results of the traceback function will be displayed as numbers. If the traceback function is nil, the symbolizer function will never be called. The context function may be nil, in which case the traceback function will only be called with the context field set to zero. If the context function is nil, then calls from Go to C to Go will not show a traceback for the C portion of the call stack. SetCgoTraceback should be called only once, ideally from an init function.

func SetCgoTraceback(version int, traceback unsafe.Pointer, context unsafe.Pointer, symbolizer unsafe.Pointer)

SetFinalizer function #

SetFinalizer sets the finalizer associated with obj to the provided finalizer function. When the garbage collector finds an unreachable block with an associated finalizer, it clears the association and runs finalizer(obj) in a separate goroutine. This makes obj reachable again, but now without an associated finalizer. Assuming that SetFinalizer is not called again, the next time the garbage collector sees that obj is unreachable, it will free obj. SetFinalizer(obj, nil) clears any finalizer associated with obj. New Go code should consider using [AddCleanup] instead, which is much less error-prone than SetFinalizer. The argument obj must be a pointer to an object allocated by calling new, by taking the address of a composite literal, or by taking the address of a local variable. The argument finalizer must be a function that takes a single argument to which obj's type can be assigned, and can have arbitrary ignored return values. If either of these is not true, SetFinalizer may abort the program. Finalizers are run in dependency order: if A points at B, both have finalizers, and they are otherwise unreachable, only the finalizer for A runs; once A is freed, the finalizer for B can run. If a cyclic structure includes a block with a finalizer, that cycle is not guaranteed to be garbage collected and the finalizer is not guaranteed to run, because there is no ordering that respects the dependencies. The finalizer is scheduled to run at some arbitrary time after the program can no longer reach the object to which obj points. There is no guarantee that finalizers will run before a program exits, so typically they are useful only for releasing non-memory resources associated with an object during a long-running program. For example, an [os.File] object could use a finalizer to close the associated operating system file descriptor when a program discards an os.File without calling Close, but it would be a mistake to depend on a finalizer to flush an in-memory I/O buffer such as a [bufio.Writer], because the buffer would not be flushed at program exit. It is not guaranteed that a finalizer will run if the size of *obj is zero bytes, because it may share same address with other zero-size objects in memory. See https://go.dev/ref/spec#Size_and_alignment_guarantees. It is not guaranteed that a finalizer will run for objects allocated in initializers for package-level variables. Such objects may be linker-allocated, not heap-allocated. Note that because finalizers may execute arbitrarily far into the future after an object is no longer referenced, the runtime is allowed to perform a space-saving optimization that batches objects together in a single allocation slot. The finalizer for an unreferenced object in such an allocation may never run if it always exists in the same batch as a referenced object. Typically, this batching only happens for tiny (on the order of 16 bytes or less) and pointer-free objects. A finalizer may run as soon as an object becomes unreachable. In order to use finalizers correctly, the program must ensure that the object is reachable until it is no longer required. Objects stored in global variables, or that can be found by tracing pointers from a global variable, are reachable. A function argument or receiver may become unreachable at the last point where the function mentions it. To make an unreachable object reachable, pass the object to a call of the [KeepAlive] function to mark the last point in the function where the object must be reachable. For example, if p points to a struct, such as os.File, that contains a file descriptor d, and p has a finalizer that closes that file descriptor, and if the last use of p in a function is a call to syscall.Write(p.d, buf, size), then p may be unreachable as soon as the program enters [syscall.Write]. The finalizer may run at that moment, closing p.d, causing syscall.Write to fail because it is writing to a closed file descriptor (or, worse, to an entirely different file descriptor opened by a different goroutine). To avoid this problem, call KeepAlive(p) after the call to syscall.Write. A single goroutine runs all finalizers for a program, sequentially. If a finalizer must run for a long time, it should do so by starting a new goroutine. In the terminology of the Go memory model, a call SetFinalizer(x, f) “synchronizes before” the finalization call f(x). However, there is no guarantee that KeepAlive(x) or any other use of x “synchronizes before” f(x), so in general a finalizer should use a mutex or other synchronization mechanism if it needs to access mutable state in x. For example, consider a finalizer that inspects a mutable field in x that is modified from time to time in the main program before x becomes unreachable and the finalizer is invoked. The modifications in the main program and the inspection in the finalizer need to use appropriate synchronization, such as mutexes or atomic updates, to avoid read-write races.

func SetFinalizer(obj any, finalizer any)

SetMutexProfileFraction function #

SetMutexProfileFraction controls the fraction of mutex contention events that are reported in the mutex profile. On average 1/rate events are reported. The previous rate is returned. To turn off profiling entirely, pass rate 0. To just read the current rate, pass rate < 0. (For n>1 the details of sampling may change.)

func SetMutexProfileFraction(rate int) int

SpanAlloc method #

SpanAlloc records an event indicating that the span has just been allocated.

func (tl traceLocker) SpanAlloc(s *mspan)

SpanExists method #

SpanExists records an event indicating that the span exists.

func (tl traceLocker) SpanExists(s *mspan)

SpanFree method #

SpanFree records an event indicating that the span is about to be freed.

func (tl traceLocker) SpanFree(s *mspan)

Stack method #

Stack returns the stack trace associated with the record, a prefix of r.Stack0.

func (r *StackRecord) Stack() []uintptr

Stack method #

Stack returns the stack trace associated with the record, a prefix of r.Stack0.

func (r *MemProfileRecord) Stack() []uintptr

Stack function #

Stack formats a stack trace of the calling goroutine into buf and returns the number of bytes written to buf. If all is true, Stack formats stack traces of all other goroutines into buf after the trace for the current goroutine.

func Stack(buf []byte, all bool) int

StartTrace function #

StartTrace enables tracing for the current process. While tracing, the data will be buffered and available via [ReadTrace]. StartTrace returns an error if tracing is already enabled. Most clients should use the [runtime/trace] package or the [testing] package's -test.trace flag instead of calling StartTrace directly.

func StartTrace() error

Stop method #

Stop cancels the cleanup call. Stop will have no effect if the cleanup call has already been queued for execution (because ptr became unreachable). To guarantee that Stop removes the cleanup function, the caller must ensure that the pointer that was passed to AddCleanup is reachable across the call to Stop.

func (c Cleanup) Stop()

StopTrace function #

StopTrace stops tracing, if it was previously enabled. StopTrace only returns after all the reads for the trace have completed.

func StopTrace()

Store method #

func (p *goroutineProfileStateHolder) Store(value goroutineProfileState)

StoreMarked method #

StoreMarked stores addr but first converted to the offset address space and then negated.

func (b *atomicOffAddr) StoreMarked(addr uintptr)

StoreMin method #

StoreMin stores addr if it's less than the current value in the offset address space if the current value is not marked.

func (b *atomicOffAddr) StoreMin(addr uintptr)

StoreNoWB method #

Stores the spanSetSpinePointer. It has the same semantics as [atomic.UnsafePointer].

func (s *atomicSpanSetSpinePointer) StoreNoWB(p spanSetSpinePointer)

StoreNoWB method #

Store stores an *mspan.

func (p *atomicMSpanPointer) StoreNoWB(s *mspan)

StoreUnmark method #

StoreUnmark attempts to unmark the value in atomicOffAddr and replace it with newAddr. markedAddr must be a marked address returned by Load. This function will not store newAddr if the box no longer contains markedAddr.

func (b *atomicOffAddr) StoreUnmark(markedAddr uintptr, newAddr uintptr)

String method #

func (w waitReason) String() string

String method #

func (rank lockRank) String() string

String method #

func (r stwReason) String() string

ThreadCreateProfile function #

ThreadCreateProfile returns n, the number of records in the thread creation profile. If len(p) >= n, ThreadCreateProfile copies the profile into p and returns n, true. If len(p) < n, ThreadCreateProfile does not change p and returns n, false. Most clients should use the runtime/pprof package instead of calling ThreadCreateProfile directly.

func ThreadCreateProfile(p []StackRecord) (n int, ok bool)

UnlockOSThread function #

UnlockOSThread undoes an earlier call to LockOSThread. If this drops the number of active LockOSThread calls on the calling goroutine to zero, it unwires the calling goroutine from its fixed operating system thread. If there are no active LockOSThread calls, this is a no-op. Before calling UnlockOSThread, the caller must ensure that the OS thread is suitable for running other goroutines. If the caller made any permanent changes to the state of the thread that would affect other goroutines, it should not call this function and thus leave the goroutine locked to the OS thread until the goroutine (and hence the thread) exits. go:nosplit

func UnlockOSThread()

Unpin method #

Unpin unpins all pinned objects of the [Pinner].

func (p *Pinner) Unpin()

Version function #

Version returns the Go tree's version string. It is either the commit hash and date at the time of the build or, when possible, a release tag like "go1.3".

func Version() string

_ELF_ST_BIND function #

How to extract and insert information held in the st_info field.

func _ELF_ST_BIND(val byte) byte

_ELF_ST_TYPE function #

func _ELF_ST_TYPE(val byte) byte

_ExternalCode function #

func _ExternalCode()

_Func method #

func (f funcInfo) _Func() *Func

_GC function #

func _GC()

_LostContendedRuntimeLock function #

func _LostContendedRuntimeLock()

_LostExternalCode function #

func _LostExternalCode()

_LostSIGPROFDuringAtomic64 function #

func _LostSIGPROFDuringAtomic64()

_System function #

func _System()

_VDSO function #

func _VDSO()

_atoi function #

func _atoi(b []byte) int

_cgo_panic_internal function #

func _cgo_panic_internal(p *byte)

_d2v function #

func _d2v(y *uint64, d float64)

_div function #

func _div()

_div64by32 function #

go:noescape

func _div64by32(a uint64, b uint32, r *uint32) (q uint32)

_divu function #

func _divu()

_initcgo function #

func _initcgo()

_mod function #

func _mod()

_modu function #

func _modu()

_mul64by32 function #

go:noescape

func _mul64by32(lo64 *uint64, a uint64, b uint32) (hi32 uint32)

a0 method #

func (c *sigctxt) a0() uint64

a0 method #

func (c *sigctxt) a0() uint64

a0 method #

func (c *sigctxt) a0() uint64

a1 method #

func (c *sigctxt) a1() uint64

a1 method #

func (c *sigctxt) a1() uint64

a1 method #

func (c *sigctxt) a1() uint64

a2 method #

func (c *sigctxt) a2() uint64

a2 method #

func (c *sigctxt) a2() uint64

a2 method #

func (c *sigctxt) a2() uint64

a3 method #

func (c *sigctxt) a3() uint64

a3 method #

func (c *sigctxt) a3() uint64

a3 method #

func (c *sigctxt) a3() uint64

a4 method #

func (c *sigctxt) a4() uint64

a4 method #

func (c *sigctxt) a4() uint64

a4 method #

func (c *sigctxt) a4() uint64

a5 method #

func (c *sigctxt) a5() uint64

a5 method #

func (c *sigctxt) a5() uint64

a5 method #

func (c *sigctxt) a5() uint64

a6 method #

func (c *sigctxt) a6() uint64

a6 method #

func (c *sigctxt) a6() uint64

a6 method #

func (c *sigctxt) a6() uint64

a7 method #

func (c *sigctxt) a7() uint64

a7 method #

func (c *sigctxt) a7() uint64

a7 method #

func (c *sigctxt) a7() uint64

abigen_sync_atomic_AddInt32 function #

go:linkname abigen_sync_atomic_AddInt32 sync/atomic.AddInt32

func abigen_sync_atomic_AddInt32(addr *int32, delta int32) (new int32)

abigen_sync_atomic_AddInt64 function #

go:linkname abigen_sync_atomic_AddInt64 sync/atomic.AddInt64

func abigen_sync_atomic_AddInt64(addr *int64, delta int64) (new int64)

abigen_sync_atomic_AddUint32 function #

go:linkname abigen_sync_atomic_AddUint32 sync/atomic.AddUint32

func abigen_sync_atomic_AddUint32(addr *uint32, delta uint32) (new uint32)

abigen_sync_atomic_AddUint64 function #

go:linkname abigen_sync_atomic_AddUint64 sync/atomic.AddUint64

func abigen_sync_atomic_AddUint64(addr *uint64, delta uint64) (new uint64)

abigen_sync_atomic_AddUintptr function #

go:linkname abigen_sync_atomic_AddUintptr sync/atomic.AddUintptr

func abigen_sync_atomic_AddUintptr(addr *uintptr, delta uintptr) (new uintptr)

abigen_sync_atomic_AndInt32 function #

go:linkname abigen_sync_atomic_AndInt32 sync/atomic.AndInt32

func abigen_sync_atomic_AndInt32(addr *int32, mask int32) (old int32)

abigen_sync_atomic_AndInt64 function #

go:linkname abigen_sync_atomic_AndInt64 sync/atomic.AndInt64

func abigen_sync_atomic_AndInt64(addr *int64, mask int64) (old int64)

abigen_sync_atomic_AndUint32 function #

go:linkname abigen_sync_atomic_AndUint32 sync/atomic.AndUint32

func abigen_sync_atomic_AndUint32(addr *uint32, mask uint32) (old uint32)

abigen_sync_atomic_AndUint64 function #

go:linkname abigen_sync_atomic_AndUint64 sync/atomic.AndUint64

func abigen_sync_atomic_AndUint64(addr *uint64, mask uint64) (old uint64)

abigen_sync_atomic_AndUintptr function #

go:linkname abigen_sync_atomic_AndUintptr sync/atomic.AndUintptr

func abigen_sync_atomic_AndUintptr(addr *uintptr, mask uintptr) (old uintptr)

abigen_sync_atomic_CompareAndSwapInt32 function #

go:linkname abigen_sync_atomic_CompareAndSwapInt32 sync/atomic.CompareAndSwapInt32

func abigen_sync_atomic_CompareAndSwapInt32(addr *int32, old int32, new int32) (swapped bool)

abigen_sync_atomic_CompareAndSwapInt64 function #

go:linkname abigen_sync_atomic_CompareAndSwapInt64 sync/atomic.CompareAndSwapInt64

func abigen_sync_atomic_CompareAndSwapInt64(addr *int64, old int64, new int64) (swapped bool)

abigen_sync_atomic_CompareAndSwapUint32 function #

go:linkname abigen_sync_atomic_CompareAndSwapUint32 sync/atomic.CompareAndSwapUint32

func abigen_sync_atomic_CompareAndSwapUint32(addr *uint32, old uint32, new uint32) (swapped bool)

abigen_sync_atomic_CompareAndSwapUint64 function #

go:linkname abigen_sync_atomic_CompareAndSwapUint64 sync/atomic.CompareAndSwapUint64

func abigen_sync_atomic_CompareAndSwapUint64(addr *uint64, old uint64, new uint64) (swapped bool)

abigen_sync_atomic_LoadInt32 function #

go:linkname abigen_sync_atomic_LoadInt32 sync/atomic.LoadInt32

func abigen_sync_atomic_LoadInt32(addr *int32) (val int32)

abigen_sync_atomic_LoadInt64 function #

go:linkname abigen_sync_atomic_LoadInt64 sync/atomic.LoadInt64

func abigen_sync_atomic_LoadInt64(addr *int64) (val int64)

abigen_sync_atomic_LoadPointer function #

go:linkname abigen_sync_atomic_LoadPointer sync/atomic.LoadPointer

func abigen_sync_atomic_LoadPointer(addr *unsafe.Pointer) (val unsafe.Pointer)

abigen_sync_atomic_LoadUint32 function #

go:linkname abigen_sync_atomic_LoadUint32 sync/atomic.LoadUint32

func abigen_sync_atomic_LoadUint32(addr *uint32) (val uint32)

abigen_sync_atomic_LoadUint64 function #

go:linkname abigen_sync_atomic_LoadUint64 sync/atomic.LoadUint64

func abigen_sync_atomic_LoadUint64(addr *uint64) (val uint64)

abigen_sync_atomic_LoadUintptr function #

go:linkname abigen_sync_atomic_LoadUintptr sync/atomic.LoadUintptr

func abigen_sync_atomic_LoadUintptr(addr *uintptr) (val uintptr)

abigen_sync_atomic_OrInt32 function #

go:linkname abigen_sync_atomic_OrInt32 sync/atomic.OrInt32

func abigen_sync_atomic_OrInt32(addr *int32, mask int32) (old int32)

abigen_sync_atomic_OrInt64 function #

go:linkname abigen_sync_atomic_OrInt64 sync/atomic.OrInt64

func abigen_sync_atomic_OrInt64(addr *int64, mask int64) (old int64)

abigen_sync_atomic_OrUint32 function #

go:linkname abigen_sync_atomic_OrUint32 sync/atomic.OrUint32

func abigen_sync_atomic_OrUint32(addr *uint32, mask uint32) (old uint32)

abigen_sync_atomic_OrUint64 function #

go:linkname abigen_sync_atomic_OrUint64 sync/atomic.OrUint64

func abigen_sync_atomic_OrUint64(addr *uint64, mask uint64) (old uint64)

abigen_sync_atomic_OrUintptr function #

go:linkname abigen_sync_atomic_OrUintptr sync/atomic.OrUintptr

func abigen_sync_atomic_OrUintptr(addr *uintptr, mask uintptr) (old uintptr)

abigen_sync_atomic_StoreInt32 function #

go:linkname abigen_sync_atomic_StoreInt32 sync/atomic.StoreInt32

func abigen_sync_atomic_StoreInt32(addr *int32, val int32)

abigen_sync_atomic_StoreInt64 function #

go:linkname abigen_sync_atomic_StoreInt64 sync/atomic.StoreInt64

func abigen_sync_atomic_StoreInt64(addr *int64, val int64)

abigen_sync_atomic_StoreUint32 function #

go:linkname abigen_sync_atomic_StoreUint32 sync/atomic.StoreUint32

func abigen_sync_atomic_StoreUint32(addr *uint32, val uint32)

abigen_sync_atomic_StoreUint64 function #

go:linkname abigen_sync_atomic_StoreUint64 sync/atomic.StoreUint64

func abigen_sync_atomic_StoreUint64(addr *uint64, val uint64)

abigen_sync_atomic_SwapInt32 function #

go:linkname abigen_sync_atomic_SwapInt32 sync/atomic.SwapInt32

func abigen_sync_atomic_SwapInt32(addr *int32, new int32) (old int32)

abigen_sync_atomic_SwapInt64 function #

go:linkname abigen_sync_atomic_SwapInt64 sync/atomic.SwapInt64

func abigen_sync_atomic_SwapInt64(addr *int64, new int64) (old int64)

abigen_sync_atomic_SwapUint32 function #

go:linkname abigen_sync_atomic_SwapUint32 sync/atomic.SwapUint32

func abigen_sync_atomic_SwapUint32(addr *uint32, new uint32) (old uint32)

abigen_sync_atomic_SwapUint64 function #

go:linkname abigen_sync_atomic_SwapUint64 sync/atomic.SwapUint64

func abigen_sync_atomic_SwapUint64(addr *uint64, new uint64) (old uint64)

abort function #

abort crashes the runtime in situations where even throw might not work. In general it should do something a debugger will recognize (e.g., an INT3 on x86). A crash in abort is recognized by the signal handler, which will attempt to tear down the runtime immediately.

func abort()

abs function #

abs returns the absolute value of x. Special cases are: abs(±Inf) = +Inf abs(NaN) = NaN

func abs(x float64) float64

access function #

Called from write_err_android.go only, but defined in sys_linux_*.s; declared here (instead of in write_err_android.go) for go vet on non-android builds. The return value is the raw syscall result, which may encode an error number. go:noescape

func access(name *byte, mode int32) int32

accumulate method #

accumulate takes a cpuStats and adds in the current state of all GC CPU counters. gcMarkPhase indicates that we're in the mark phase and that certain counter values should be used.

func (s *cpuStats) accumulate(now int64, gcMarkPhase bool)

accumulate method #

accumulate adds time to the bucket and signals whether the limiter is enabled. This is an internal function that deals just with the bucket. Prefer update. l.lock must be held.

func (l *gcCPULimiterState) accumulate(mutatorTime int64, gcTime int64)

accumulateGCPauseTime method #

accumulateGCPauseTime add dt*stwProcs to the GC CPU pause time stats. dt should be the actual time spent paused, for orthogonality. maxProcs should be GOMAXPROCS, not work.stwprocs, since this number must be comparable to a total time computed from GOMAXPROCS.

func (s *cpuStats) accumulateGCPauseTime(dt int64, maxProcs int32)

acquire method #

acquire returns a heapStatsDelta to be updated. In effect, it acquires the shard for writing. release must be called as soon as the relevant deltas are updated. The returned heapStatsDelta must be updated atomically. The caller's P must not change between acquire and release. This also means that the caller should not acquire a P or release its P in between. A P also must not acquire a given consistentHeapStats if it hasn't yet released it. nosplit because a stack growth in this function could lead to a stack allocation that could reenter the function. go:nosplit

func (m *consistentHeapStats) acquire() *heapStatsDelta

acquireLockRankAndM function #

This function may be called in nosplit context and thus must be nosplit. go:nosplit

func acquireLockRankAndM(rank lockRank)

acquireLockRankAndM function #

acquireLockRankAndM acquires a rank which is not associated with a mutex lock. To maintain the invariant that an M with m.locks==0 does not hold any lock-like resources, it also acquires the M. This function may be called in nosplit context and thus must be nosplit. go:nosplit

func acquireLockRankAndM(rank lockRank)

acquireStatus method #

acquireStatus acquires the right to emit a Status event for the scheduling resource. nosplit because it's part of writing an event for an M, which must not have any stack growth. go:nosplit

func (r *traceSchedResourceState) acquireStatus(gen uintptr) bool

acquireSudog function #

go:nosplit

func acquireSudog() *sudog

acquirem function #

go:nosplit

func acquirem() *m

acquirep function #

Associate p and the current m. This function is allowed to have write barriers even if the caller isn't because it immediately acquires pp. go:yeswritebarrierrec

func acquirep(pp *p)

activeModules function #

activeModules returns a slice of active modules. A module is active once its gcdatamask and gcbssmask have been assembled and it is usable by the GC. This is nosplit/nowritebarrier because it is called by the cgo pointer checking code. go:nosplit go:nowritebarrier

func activeModules() []*moduledata

add method #

add adds the stack trace to the profile. It is called from signal handlers and other limited environments and cannot allocate memory or acquire locks that might be held at the time of the signal, nor can it use substantial amounts of stack. go:nowritebarrierrec

func (p *cpuProfile) add(tagPtr *unsafe.Pointer, stk []uintptr)

add method #

func (p *notInHeap) add(bytes uintptr) *notInHeap

add function #

Should be a built-in for unsafe.Pointer? add should be an internal detail, but widely used packages access it using linkname. Notable members of the hall of shame include: - fortio.org/log Do not remove or change the type signature. See go.dev/issue/67401. go:linkname add go:nosplit

func add(p unsafe.Pointer, x uintptr) unsafe.Pointer

add method #

add atomically adds the sysMemStat by n. Must be nosplit as it is called in runtime initialization, e.g. newosproc0. go:nosplit

func (s *sysMemStat) add(n int64)

add method #

add adds a uintptr offset to the offAddr.

func (l offAddr) add(bytes uintptr) offAddr

add method #

add inserts a new address range to a. r must not overlap with any address range in a and r.size() must be > 0.

func (a *addrRanges) add(r addrRange)

add method #

add accumulates b into a. It does not zero b.

func (a *memRecordCycle) add(b *memRecordCycle)

add method #

add adds the given itab to itab table t. itabLock must be held.

func (t *itabTableType) add(m *itab)

add1 function #

add1 returns the byte pointer p+1. go:nowritebarrier go:nosplit

func add1(p *byte) *byte

addAssistTime method #

addAssistTime notifies the limiter of additional assist time. It will be included in the next update.

func (l *gcCPULimiterState) addAssistTime(t int64)

addCleanup function #

addCleanup attaches a cleanup function to the object. Multiple cleanups are allowed on an object, and even the same pointer. A cleanup id is returned which can be used to uniquely identify the cleanup.

func addCleanup(p unsafe.Pointer, f *funcval) uint64

addCountsAndClearFlags method #

addCountsAndClearFlags returns the packed form of "x + (data, tag) - all flags".

func (x profIndex) addCountsAndClearFlags(data int, tag int) profIndex

addCovMeta function #

The compiler emits calls to runtime.addCovMeta but this code has moved to rtcov.AddMeta.

func addCovMeta(p unsafe.Pointer, dlen uint32, hash [16]byte, pkgpath string, pkgid int, cmode uint8, cgran uint8) uint32

addExtra method #

addExtra adds the "extra" profiling events, queued by addNonGo, to the profile log. addExtra is called either from a signal handler on a Go thread or from an ordinary goroutine; either way it can use stack and has a g. The world may be stopped, though.

func (p *cpuProfile) addExtra()

addExtraM function #

Adds a newly allocated M to the extra M list. go:nosplit

func addExtraM(mp *m)

addGlobals method #

func (c *gcControllerState) addGlobals(amount int64)

addHeap method #

addHeap adds t to the timers heap. The caller must hold ts.lock or the world must be stopped. The caller must also have checked that t belongs in the heap. Callers that are not sure can call t.maybeAdd instead, but note that maybeAdd has different locking requirements.

func (ts *timers) addHeap(t *timer)

addIdleMarkWorker method #

addIdleMarkWorker attempts to add a new idle mark worker. If this returns true, the caller must become an idle mark worker unless there's no background mark worker goroutines in the pool. This case is harmless because there are already background mark workers running. If this returns false, the caller must NOT become an idle mark worker. nosplit because it may be called without a P. go:nosplit

func (c *gcControllerState) addIdleMarkWorker() bool

addIdleTime method #

addIdleTime notifies the limiter of additional time a P spent on the idle list. It will be subtracted from the total CPU time in the next update.

func (l *gcCPULimiterState) addIdleTime(t int64)

addNonGo method #

addNonGo adds the non-Go stack trace to the profile. It is called from a non-Go thread, so we cannot use much stack at all, nor do anything that needs a g or an m. In particular, we can't call cpuprof.log.write. Instead, we copy the stack into cpuprof.extra, which will be drained the next time a Go thread gets the signal handling event. go:nosplit go:nowritebarrierrec

func (p *cpuProfile) addNonGo(stk []uintptr)

addObject method #

addObject adds a stack object at addr of type typ to the set of stack objects.

func (s *stackScanState) addObject(addr uintptr, r *stackObjectRecord)

addScannableStack method #

func (c *gcControllerState) addScannableStack(pp *p, amount int64)

addWakeupEvent function #

func addWakeupEvent(kq int32)

addWakeupEvent function #

func addWakeupEvent(kq int32)

addb function #

addb returns the byte pointer p+n. go:nowritebarrier go:nosplit

func addb(p *byte, n uintptr) *byte

addfinalizer function #

Adds a finalizer to the object p. Returns true if it succeeded.

func addfinalizer(p unsafe.Pointer, f *funcval, nret uintptr, fint *_type, ot *ptrtype) bool

addmoduledata function #

Called from linker-generated .initarray; declared for go vet; do NOT call from Go.

func addmoduledata()

addr method #

addr returns the virtual address for this offset address.

func (l offAddr) addr() uintptr

addrsToSummaryRange function #

addrsToSummaryRange converts base and limit pointers into a range of entries for the given summary level. The returned range is inclusive on the lower bound and exclusive on the upper bound.

func addrsToSummaryRange(level int, base uintptr, limit uintptr) (lo int, hi int)

addspecial function #

addspecial adds the special record s to the list of special records for the object p. All fields of s should be filled in except for offset & next, which this routine will fill in. Returns true if the special was successfully added, false otherwise. (The add will fail only if a record with the same p and s->kind already exists unless force is set to true.)

func addspecial(p unsafe.Pointer, s *special, force bool) bool

adjust method #

adjust looks through the timers in ts.heap for any timers that have been modified to run earlier, and puts them in the correct place in the heap. While looking for those timers, it also moves timers that have been modified to run later, and removes deleted timers. The caller must have locked ts.

func (ts *timers) adjust(now int64, force bool)

adjustSignalStack function #

adjustSignalStack adjusts the current stack guard based on the stack pointer that is actually in use while handling a signal. We do this in case some non-Go code called sigaltstack. This reports whether the stack was adjusted, and if so stores the old signal stack in *gsigstack. go:nosplit

func adjustSignalStack(sig uint32, mp *m, gsigStack *gsignalStack) bool

adjustSignalStack2 function #

go:nosplit

func adjustSignalStack2(sig uint32, sp uintptr, mp *m, ssDisable bool)

adjustctxt function #

func adjustctxt(gp *g, adjinfo *adjustinfo)

adjustdefers function #

func adjustdefers(gp *g, adjinfo *adjustinfo)

adjustframe function #

Note: the argument/return area is adjusted by the callee.

func adjustframe(frame *stkframe, adjinfo *adjustinfo)

adjustpanics function #

func adjustpanics(gp *g, adjinfo *adjustinfo)

adjustpointer function #

adjustpointer checks whether *vpp is in the old stack described by adjinfo. If so, it rewrites *vpp to point into the new stack.

func adjustpointer(adjinfo *adjustinfo, vpp unsafe.Pointer)

adjustpointers function #

bv describes the memory starting at address scanp. Adjust any pointers contained therein.

func adjustpointers(scanp unsafe.Pointer, bv *bitvector, adjinfo *adjustinfo, f funcInfo)

adjustsudogs function #

func adjustsudogs(gp *g, adjinfo *adjustinfo)

advance method #

advance advances the markBits to the next object in the span.

func (m *markBits) advance()

advanceEvacuationMark function #

func advanceEvacuationMark(h *hmap, t *maptype, newbit uintptr)

alginit function #

func alginit()

alignDown function #

alignDown rounds n down to a multiple of a. a must be a power of 2. go:nosplit

func alignDown(n uintptr, a uintptr) uintptr

alignUp function #

alignUp rounds n up to a multiple of a. a must be a power of 2. go:nosplit

func alignUp(n uintptr, a uintptr) uintptr

allGsSnapshot function #

allGsSnapshot returns a snapshot of the slice of all Gs. The world must be stopped or allglock must be held.

func allGsSnapshot() []*g

allZero function #

func allZero(b []byte) bool

allgadd function #

func allgadd(gp *g)

alloc method #

alloc tries to grab a spanSetBlock out of the pool, and if it fails persistentallocs a new one and returns it.

func (p *spanSetBlockAlloc) alloc() *spanSetBlock

alloc method #

alloc allocates npages worth of memory from the page heap, returning the base address for the allocation and the amount of scavenged memory in bytes contained in the region [base address, base address + npages*pageSize). Returns a 0 base address on failure, in which case other returned values should be ignored. p.mheapLock must be held. Must run on the system stack because p.mheapLock must be held. go:systemstack

func (p *pageAlloc) alloc(npages uintptr) (addr uintptr, scav uintptr)

alloc method #

alloc allocates npages from the page cache and is the main entry point for allocation. Returns a base address and the amount of scavenged memory in the allocated region in bytes. Returns a base address of zero on failure, in which case the amount of scavenged memory should be ignored.

func (c *pageCache) alloc(npages uintptr) (uintptr, uintptr)

alloc method #

alloc updates metadata for chunk at index ci with the fact that an allocation of npages occurred. It also eagerly attempts to collapse the chunk's memory into hugepage if the chunk has become sufficiently dense and we're not allocating the whole chunk at once (which suggests the allocation is part of a bigger one and it's probably not worth eagerly collapsing). alloc may only run concurrently with find.

func (s *scavengeIndex) alloc(ci chunkIdx, npages uint)

alloc method #

alloc updates sc given that npages were allocated in the corresponding chunk.

func (sc *scavChunkData) alloc(npages uint, newGen uint32)

alloc method #

alloc allocates a new span of npage pages from the GC'd heap. spanclass indicates the span's size class and scannability. Returns a span that has been fully initialized. span.needzero indicates whether the span has been zeroed. Note that it may not be.

func (h *mheap) alloc(npages uintptr, spanclass spanClass) *mspan

alloc method #

func (f *fixalloc) alloc() unsafe.Pointer

alloc method #

alloc allocates n-byte block. The block is always aligned to 8 bytes, regardless of platform.

func (a *traceRegionAlloc) alloc(n uintptr) *notInHeap

alloc method #

func (c *pollCache) alloc() *pollDesc

alloc method #

func (l *linearAlloc) alloc(size uintptr, align uintptr, sysStat *sysMemStat) unsafe.Pointer

alloc method #

alloc reserves space in the current chunk or calls refill and reserves space in a new chunk. If cap is negative, the type will be taken literally, otherwise it will be considered as an element type for a slice backing store with capacity cap.

func (a *userArena) alloc(typ *_type, cap int) unsafe.Pointer

allocAll method #

allocAll sets every bit in the bitmap to 1 and updates the scavenged bits appropriately.

func (m *pallocData) allocAll()

allocAll method #

allocAll allocates all the bits of b.

func (b *pallocBits) allocAll()

allocBitsForIndex method #

go:nosplit

func (s *mspan) allocBitsForIndex(allocBitIndex uintptr) markBits

allocLarge method #

allocLarge allocates a span for a large object.

func (c *mcache) allocLarge(size uintptr, noscan bool) *mspan

allocMSpanLocked method #

allocMSpanLocked allocates an mspan object. h.lock must be held. allocMSpanLocked must be called on the system stack because its caller holds the heap lock. See mheap for details. Running on the system stack also ensures that we won't switch Ps during this function. See tryAllocMSpan for details. go:systemstack

func (h *mheap) allocMSpanLocked() *mspan

allocManual method #

allocManual allocates a manually-managed span of npage pages. allocManual returns nil if allocation fails. allocManual adds the bytes used to *stat, which should be a memstats in-use field. Unlike allocations in the GC'd heap, the allocation does *not* count toward heapInUse. The memory backing the returned span may not be zeroed if span.needzero is set. allocManual must be called on the system stack because it may acquire the heap lock via allocSpan. See mheap for details. If new code is written to call allocManual, do NOT use an existing spanAllocType value and instead declare a new one. go:systemstack

func (h *mheap) allocManual(npages uintptr, typ spanAllocType) *mspan

allocN method #

allocN is a helper which attempts to allocate npages worth of pages from the cache. It represents the general case for allocating from the page cache. Returns a base address and the amount of scavenged memory in the allocated region in bytes.

func (c *pageCache) allocN(npages uintptr) (uintptr, uintptr)

allocNeedsZero method #

allocNeedsZero checks if the region of address space [base, base+npage*pageSize), assumed to be allocated, needs to be zeroed, updating heap arena metadata for future allocations. This must be called each time pages are allocated from the heap, even if the page allocator can otherwise prove the memory it's allocating is already zero because they're fresh from the operating system. It updates heapArena metadata that is critical for future page allocations. There are no locking constraints on this method.

func (h *mheap) allocNeedsZero(base uintptr, npage uintptr) (needZero bool)

allocPages64 method #

allocPages64 allocates a 64-bit block of 64 pages aligned to 64 pages according to the bits set in alloc. The block set is the one containing the i'th page.

func (b *pallocBits) allocPages64(i uint, alloc uint64)

allocRange method #

allocRange allocates the range [i, i+n).

func (b *pallocBits) allocRange(i uint, n uint)

allocRange method #

allocRange marks the range of memory [base, base+npages*pageSize) as allocated. It also updates the summaries to reflect the newly-updated bitmap. Returns the amount of scavenged memory in bytes present in the allocated range. p.mheapLock must be held.

func (p *pageAlloc) allocRange(base uintptr, npages uintptr) uintptr

allocRange method #

allocRange sets bits [i, i+n) in the bitmap to 1 and updates the scavenged bits appropriately.

func (m *pallocData) allocRange(i uint, n uint)

allocSpan method #

allocSpan allocates an mspan which owns npages worth of memory. If typ.manual() == false, allocSpan allocates a heap span of class spanclass and updates heap accounting. If manual == true, allocSpan allocates a manually-managed span (spanclass is ignored), and the caller is responsible for any accounting related to its use of the span. Either way, allocSpan will atomically add the bytes in the newly allocated span to *sysStat. The returned span is fully initialized. h.lock must not be held. allocSpan must be called on the system stack both because it acquires the heap lock and because it must block GC transitions. go:systemstack

func (h *mheap) allocSpan(npages uintptr, typ spanAllocType, spanclass spanClass) (s *mspan)

allocToCache method #

allocToCache acquires a pageCachePages-aligned chunk of free pages which may not be contiguous, and returns a pageCache structure which owns the chunk. p.mheapLock must be held. Must run on the system stack because p.mheapLock must be held. go:systemstack

func (p *pageAlloc) allocToCache() pageCache

allocUserArenaChunk method #

allocUserArenaChunk attempts to reuse a free user arena chunk represented as a span. Must be in a non-preemptible state to ensure the consistency of statistics exported to MemStats. Acquires the heap lock. Must run on the system stack for that reason. go:systemstack

func (h *mheap) allocUserArenaChunk() *mspan

allocm function #

Allocate a new m unassociated with any thread. Can use p for allocation context if needed. fn is recorded as the new m's m.mstartfn. id is optional pre-allocated m ID. Omit by passing -1. This function is allowed to have write barriers even if the caller isn't because it borrows pp. go:yeswritebarrierrec

func allocm(pp *p, fn func(), id int64) *m

allocmcache function #

func allocmcache() *mcache

appendIntStr function #

func appendIntStr(b []byte, v int64, signed bool) []byte

arc4random_buf function #

go:nosplit go:cgo_unsafe_args

func arc4random_buf(p unsafe.Pointer, n int32)

arc4random_buf_trampoline function #

func arc4random_buf_trampoline()

archauxv function #

func archauxv(tag uintptr, val uintptr)

archauxv function #

func archauxv(tag uintptr, val uintptr)

archauxv function #

func archauxv(tag uintptr, val uintptr)

archauxv function #

func archauxv(tag uintptr, val uintptr)

archauxv function #

func archauxv(tag uintptr, val uintptr)

archauxv function #

func archauxv(tag uintptr, val uintptr)

archauxv function #

func archauxv(tag uintptr, val uintptr)

archauxv function #

func archauxv(tag uintptr, val uintptr)

archauxv function #

func archauxv(tag uintptr, val uintptr)

archauxv function #

func archauxv(tag uintptr, val uintptr)

arenaBase function #

arenaBase returns the low address of the region covered by heap arena i.

func arenaBase(i arenaIdx) uintptr

arenaIndex function #

arenaIndex returns the index into mheap_.arenas of the arena containing metadata for p. This index combines of an index into the L1 map and an index into the L2 map and should be used as mheap_.arenas[ai.l1()][ai.l2()]. If p is outside the range of valid heap addresses, either l1() or l2() will be out of bounds. It is nosplit because it's called by spanOf and several other nosplit functions. go:nosplit

func arenaIndex(p uintptr) arenaIdx

arena_arena_Free function #

arena_arena_Free is a wrapper around (*userArena).free. go:linkname arena_arena_Free arena.runtime_arena_arena_Free

func arena_arena_Free(arena unsafe.Pointer)

arena_arena_New function #

arena_arena_New is a wrapper around (*userArena).new, except that typ is an any (must be a *_type, still) and typ must be a type descriptor for a pointer to the type to actually be allocated, i.e. pass a *T to allocate a T. This is necessary because this function returns a *T. go:linkname arena_arena_New arena.runtime_arena_arena_New

func arena_arena_New(arena unsafe.Pointer, typ any) any

arena_arena_Slice function #

arena_arena_Slice is a wrapper around (*userArena).slice. go:linkname arena_arena_Slice arena.runtime_arena_arena_Slice

func arena_arena_Slice(arena unsafe.Pointer, slice any, cap int)

arena_heapify function #

arena_heapify takes a value that lives in an arena and makes a copy of it on the heap. Values that don't live in an arena are returned unmodified. go:linkname arena_heapify arena.runtime_arena_heapify

func arena_heapify(s any) any

arena_newArena function #

arena_newArena is a wrapper around newUserArena. go:linkname arena_newArena arena.runtime_arena_newArena

func arena_newArena() unsafe.Pointer

argBytes method #

argBytes returns the argument frame size for a call to frame.fn.

func (frame *stkframe) argBytes() uintptr

argMapInternal method #

argMapInternal is used internally by stkframe to fetch special argument maps. argMap.n is always populated with the size of the argument map. argMap.bytedata is only populated for dynamic argument maps (used by reflect). If the caller requires the argument map, it should use this if non-nil, and otherwise fetch the argument map using the current PC. hasReflectStackObj indicates that this frame also has a reflect function stack object, which the caller must synthesize.

func (frame *stkframe) argMapInternal() (argMap bitvector, hasReflectStackObj bool)

args function #

func args(c int32, v **byte)

args_get function #

go:wasmimport wasi_snapshot_preview1 args_get go:noescape

func args_get(argv *uintptr32, argvBuf *byte) errno

args_sizes_get function #

go:wasmimport wasi_snapshot_preview1 args_sizes_get go:noescape

func args_sizes_get(argc *size, argvBufLen *size) errno

argv_index function #

nosplit for use in linux startup sysargs. go:nosplit

func argv_index(argv **byte, i int32) *byte

asanpoison function #

func asanpoison(addr unsafe.Pointer, sz uintptr)

asanpoison function #

go:noescape

func asanpoison(addr unsafe.Pointer, sz uintptr)

asanread function #

go:linkname asanread go:nosplit

func asanread(addr unsafe.Pointer, sz uintptr)

asanread function #

func asanread(addr unsafe.Pointer, sz uintptr)

asanregisterglobals function #

func asanregisterglobals(addr unsafe.Pointer, sz uintptr)

asanregisterglobals function #

go:noescape

func asanregisterglobals(addr unsafe.Pointer, n uintptr)

asanunpoison function #

func asanunpoison(addr unsafe.Pointer, sz uintptr)

asanunpoison function #

go:noescape

func asanunpoison(addr unsafe.Pointer, sz uintptr)

asanwrite function #

go:linkname asanwrite go:nosplit

func asanwrite(addr unsafe.Pointer, sz uintptr)

asanwrite function #

func asanwrite(addr unsafe.Pointer, sz uintptr)

asmSigaction function #

asmSigaction is implemented in assembly. go:noescape

func asmSigaction(sig uintptr, new *sigactiont, old *sigactiont) int32

asmcgocall function #

go:noescape

func asmcgocall(fn unsafe.Pointer, arg unsafe.Pointer) int32

asmcgocall_landingpad function #

go:systemstack

func asmcgocall_landingpad()

asmcgocall_no_g function #

go:noescape

func asmcgocall_no_g(fn unsafe.Pointer, arg unsafe.Pointer)

asmcgocall_no_g function #

go:noescape

func asmcgocall_no_g(fn unsafe.Pointer, arg unsafe.Pointer)

asmcgocall_no_g function #

go:noescape

func asmcgocall_no_g(fn unsafe.Pointer, arg unsafe.Pointer)

asmcgocall_no_g function #

go:noescape

func asmcgocall_no_g(fn unsafe.Pointer, arg unsafe.Pointer)

asmcgocall_no_g function #

go:noescape

func asmcgocall_no_g(fn unsafe.Pointer, arg unsafe.Pointer)

asmcgocall_no_g function #

go:noescape

func asmcgocall_no_g(fn unsafe.Pointer, arg unsafe.Pointer)

asmcgocall_no_g function #

go:noescape

func asmcgocall_no_g(fn unsafe.Pointer, arg unsafe.Pointer)

asminit function #

func asminit()

asmstdcall function #

Call a Windows function with stdcall conventions, and switch to os stack during the call.

func asmstdcall(fn unsafe.Pointer)

asmstdcall_trampoline function #

asmstdcall_trampoline calls asmstdcall converting from Go to C calling convention.

func asmstdcall_trampoline(args unsafe.Pointer)

asmsysvicall6 function #

func asmsysvicall6()

assertE2I function #

func assertE2I(inter *interfacetype, t *_type) *itab

assertE2I2 function #

func assertE2I2(inter *interfacetype, t *_type) *itab

assertLockHeld function #

assertLockHeld throws if l is not held by the caller. nosplit to ensure it can be called in as many contexts as possible. go:nosplit

func assertLockHeld(l *mutex)

assertLockHeld function #

go:nosplit

func assertLockHeld(l *mutex)

assertRankHeld function #

go:nosplit

func assertRankHeld(r lockRank)

assertRankHeld function #

assertRankHeld throws if a mutex with rank r is not held by the caller. This is less precise than assertLockHeld, but can be used in places where a pointer to the exact mutex is not available. nosplit to ensure it can be called in as many contexts as possible. go:nosplit

func assertRankHeld(r lockRank)

assertWorldStopped function #

assertWorldStopped throws if the world is not stopped. It does not check which M stopped the world. nosplit to ensure it can be called in as many contexts as possible. go:nosplit

func assertWorldStopped()

assertWorldStopped function #

go:nosplit

func assertWorldStopped()

assertWorldStoppedOrLockHeld function #

go:nosplit

func assertWorldStoppedOrLockHeld(l *mutex)

assertWorldStoppedOrLockHeld function #

assertWorldStoppedOrLockHeld throws if the world is not stopped and the passed lock is not held. nosplit to ensure it can be called in as many contexts as possible. go:nosplit

func assertWorldStoppedOrLockHeld(l *mutex)

assignArg method #

func (p *abiDesc) assignArg(t *_type)

assignReg method #

assignReg attempts to assign a single register for an argument with the given size, at the given offset into the value in the C ABI space. Returns whether the assignment was successful.

func (p *abiDesc) assignReg(size uintptr, offset uintptr) bool

asyncPreempt function #

asyncPreempt saves all user registers and calls asyncPreempt2. When stack scanning encounters an asyncPreempt frame, it scans that frame and its parent frame conservatively. asyncPreempt is implemented in assembly.

func asyncPreempt()

asyncPreempt2 function #

go:nosplit

func asyncPreempt2()

atoi function #

atoi is like atoi64 but for integers that fit into an int.

func atoi(s string) (int, bool)

atoi32 function #

atoi32 is like atoi but for integers that fit into an int32.

func atoi32(s string) (int32, bool)

atoi64 function #

atoi64 parses an int64 from a string s. The bool result reports whether s is a number representable by a value of type int64.

func atoi64(s string) (int64, bool)

atolwhex function #

func atolwhex(p string) int64

atomicAllG function #

atomicAllG returns &allgs[0] and len(allgs) for use with atomicAllGIndex.

func atomicAllG() (**g, uintptr)

atomicAllGIndex function #

atomicAllGIndex returns ptr[i] with the allgptr returned from atomicAllG.

func atomicAllGIndex(ptr **g, i uintptr) *g

atomic_casPointer function #

atomic_casPointer is the implementation of runtime/internal/UnsafePointer.CompareAndSwap (like CompareAndSwapNoWB but with the write barrier). go:nosplit go:linkname atomic_casPointer internal/runtime/atomic.casPointer

func atomic_casPointer(ptr *unsafe.Pointer, old unsafe.Pointer, new unsafe.Pointer) bool

atomic_storePointer function #

atomic_storePointer is the implementation of runtime/internal/UnsafePointer.Store (like StoreNoWB but with the write barrier). go:nosplit go:linkname atomic_storePointer internal/runtime/atomic.storePointer

func atomic_storePointer(ptr *unsafe.Pointer, new unsafe.Pointer)

atomicstorep function #

atomicstorep performs *ptr = new atomically and invokes a write barrier. go:nosplit

func atomicstorep(ptr unsafe.Pointer, new unsafe.Pointer)

atomicwb function #

atomicwb performs a write barrier before an atomic pointer write. The caller should guard the call with "if writeBarrier.enabled". atomicwb should be an internal detail, but widely used packages access it using linkname. Notable members of the hall of shame include: - github.com/bytedance/gopkg - github.com/songzhibin97/gkit Do not remove or change the type signature. See go.dev/issue/67401. go:linkname atomicwb go:nosplit

func atomicwb(ptr *unsafe.Pointer, new unsafe.Pointer)

available method #

nosplit because it's part of writing an event for an M, which must not have any stack growth. go:nosplit

func (buf *traceBuf) available(size int) bool

b method #

go:nosplit

func (l dloggerFake) b(x bool) dloggerFake

b method #

go:nosplit

func (l *dloggerImpl) b(x bool) *dloggerImpl

badDefer function #

badDefer returns a fixed bad defer pointer for poisoning an atomic defer list head.

func badDefer() *_defer

badFuncInfoEntry function #

go:linkname badFuncInfoEntry runtime.funcInfo.entry

func badFuncInfoEntry(funcInfo) uintptr

badPointer function #

badPointer throws bad pointer in heap panic.

func badPointer(s *mspan, p uintptr, refBase uintptr, refOff uintptr)

badSrcFunc function #

go:linkname badSrcFunc runtime.(*inlineUnwinder).srcFunc

func badSrcFunc(*inlineUnwinder, inlineFrame) srcFunc

badSrcFuncName function #

go:linkname badSrcFuncName runtime.srcFunc.name

func badSrcFuncName(srcFunc) string

badTimer function #

badTimer is called if the timer data structures have been corrupted, presumably due to racy use by the program. We panic here rather than panicking due to invalid slice access while holding locks. See issue #25686.

func badTimer()

badcgocallback function #

called from assembly.

func badcgocallback()

badctxt function #

go:nosplit

func badctxt()

badmcall function #

called from assembly.

func badmcall(fn func(*g))

badmcall2 function #

func badmcall2(fn func(*g))

badmorestackg0 function #

go:nosplit go:nowritebarrierrec

func badmorestackg0()

badmorestackgsignal function #

go:nosplit go:nowritebarrierrec

func badmorestackgsignal()

badreflectcall function #

func badreflectcall()

badsignal function #

This runs on a foreign stack, without an m or a g. No stack split. go:nosplit go:norace go:nowritebarrierrec

func badsignal(sig uintptr, c *sigctxt)

badsignal2 function #

This runs on a foreign stack, without an m or a g. No stack split. go:nosplit

func badsignal2()

badsystemstack function #

go:nosplit go:nowritebarrierrec

func badsystemstack()

badunlockosthread function #

func badunlockosthread()

balance method #

balance moves some work that's cached in this gcWork back on the global queue. go:nowritebarrierrec

func (w *gcWork) balance()

base method #

func (s *mspan) base() uintptr

becomeSpinning method #

func (mp *m) becomeSpinning()

beforeIdle function #

beforeIdle gets called by the scheduler if no goroutine is awake. If we are not already handling an event, then we pause for an async event. If an event handler returned, we resume it and it will pause the execution. beforeIdle either returns the specific goroutine to schedule next or indicates with otherReady that some goroutine became ready. TODO(drchase): need to understand if write barriers are really okay in this context. go:yeswritebarrierrec

func beforeIdle(now int64, pollUntil int64) (gp *g, otherReady bool)

beforeIdle function #

func beforeIdle(int64, int64) (*g, bool)

beforeIdle function #

func beforeIdle(int64, int64) (*g, bool)

beforeIdle function #

func beforeIdle(int64, int64) (*g, bool)

begin method #

begin registers a new sweeper. Returns a sweepLocker for acquiring spans for sweeping. Any outstanding sweeper blocks sweep termination. If the sweepLocker is invalid, the caller can be sure that all outstanding sweep work has been drained, so there is nothing left to sweep. Note that there may be sweepers currently running, so this does not indicate that all sweeping has completed. Even if the sweepLocker is invalid, its sweepGen is always valid.

func (a *activeSweep) begin() sweepLocker

begin method #

func (lt *lockTimer) begin()

bgscavenge function #

Background scavenger. The background scavenger maintains the RSS of the application below the line described by the proportional scavenging statistics in the mheap struct.

func bgscavenge(c chan int)

bgsweep function #

func bgsweep(c chan int)

binarySearchTree function #

Build a binary search tree with the n objects in the list x.obj[idx], x.obj[idx+1], ..., x.next.obj[0], ... Returns the root of that tree, and the buf+idx of the nth object after x.obj[idx]. (The first object that was not included in the binary search tree.) If n == 0, returns nil, x.

func binarySearchTree(x *stackObjectBuf, idx int, n int) (root *stackObject, restBuf *stackObjectBuf, restIdx int)

binuptime function #

based on /usr/src/lib/libc/sys/__vdso_gettimeofday.c go:nosplit

func binuptime(abs bool) (bt bintime)

bitp method #

bitp returns a pointer to the byte containing bit n and a mask for selecting that bit from *bytep.

func (b *gcBits) bitp(n uintptr) (bytep *uint8, mask uint8)

block function #

func block()

block64 method #

block64 returns the 64-bit aligned block of bits containing the i'th bit.

func (b *pageBits) block64(i uint) uint64

blockAlignSummaryRange function #

blockAlignSummaryRange aligns indices into the given level to that level's block width (1 << levelBits[level]). It assumes lo is inclusive and hi is exclusive, and so aligns them down and up respectively.

func blockAlignSummaryRange(level int, lo int, hi int) (int, int)

blockProfileInternal function #

blockProfileInternal returns the number of records n in the profile. If there are less than size records, copyFn is invoked for each record, and ok returns true.

func blockProfileInternal(size int, copyFn func(profilerecord.BlockProfileRecord)) (n int, ok bool)

blockTimerChan function #

blockTimerChan is called when a channel op has decided to block on c. The caller holds the channel lock for c and possibly other channels. blockTimerChan makes sure that c is in a timer heap, adding it if needed.

func blockTimerChan(c *hchan)

blockUntilEmptyFinalizerQueue function #

blockUntilEmptyFinalizerQueue blocks until either the finalizer queue is emptied (and the finalizers have executed) or the timeout is reached. Returns true if the finalizer queue was emptied. This is used by the runtime and sync tests.

func blockUntilEmptyFinalizerQueue(timeout int64) bool

blockableSig function #

blockableSig reports whether sig may be blocked by the signal mask. We never want to block the signals marked _SigUnblock; these are the synchronous signals that turn into a Go panic. We never want to block the preemption signal if it is being used. In a Go program--not a c-archive/c-shared--we never want to block the signals marked _SigKill or _SigThrow, as otherwise it's possible for all running threads to block them and delay their delivery until we start a new thread. When linked into a C program we let the C code decide on the disposition of those signals.

func blockableSig(sig uint32) bool

blockevent function #

func blockevent(cycles int64, skip int)

blocksampled function #

blocksampled returns true for all events where cycles >= rate. Shorter events have a cycles/rate random chance of returning true.

func blocksampled(cycles int64, rate int64) bool

bool2int function #

bool2int returns 0 if x is false or 1 if x is true.

func bool2int(x bool) int

bootstrapRand function #

bootstrapRand returns a random uint64 from the global random generator.

func bootstrapRand() uint64

bootstrapRandReseed function #

bootstrapRandReseed reseeds the bootstrap random number generator, clearing from memory any trace of previously returned random numbers.

func bootstrapRandReseed()

boring_registerCache function #

go:linkname boring_registerCache crypto/internal/boring/bcache.registerCache

func boring_registerCache(p unsafe.Pointer)

boring_runtime_arg0 function #

go:linkname boring_runtime_arg0 crypto/internal/boring.runtime_arg0

func boring_runtime_arg0() string

bp method #

bp returns the blockRecord associated with the blockProfile bucket b.

func (b *bucket) bp() *blockRecord

breakpoint function #

func breakpoint()

brk_ function #

go:noescape

func brk_(addr unsafe.Pointer) int32

bswapIfBigEndian function #

bswapIfBigEndian swaps the byte order of the uintptr on goarch.BigEndian platforms, and leaves it alone elsewhere.

func bswapIfBigEndian(x uintptr) uintptr

bucketEvacuated function #

func bucketEvacuated(t *maptype, h *hmap, bucket uintptr) bool

bucketMask function #

bucketMask returns 1<

func bucketMask(b uint8) uintptr

bucketShift function #

bucketShift returns 1<

func bucketShift(b uint8) uintptr

buildGCMask function #

buildGCMask writes the ptr/nonptr bitmap for t to dst. t must have a pointer.

func buildGCMask(t *_type, dst bitCursor)

buildIndex method #

buildIndex initializes s.root to a binary search tree. It should be called after all addObject calls but before any call of findObject.

func (s *stackScanState) buildIndex()

buildInterfaceSwitchCache function #

buildInterfaceSwitchCache constructs an interface switch cache containing all the entries from oldC plus the new entry (typ,case_,tab).

func buildInterfaceSwitchCache(oldC *abi.InterfaceSwitchCache, typ *_type, case_ int, tab *itab) *abi.InterfaceSwitchCache

buildTypeAssertCache function #

func buildTypeAssertCache(oldC *abi.TypeAssertCache, typ *_type, tab *itab) *abi.TypeAssertCache

bulkBarrierBitmap function #

bulkBarrierBitmap executes write barriers for copying from [src, src+size) to [dst, dst+size) using a 1-bit pointer bitmap. src is assumed to start maskOffset bytes into the data covered by the bitmap in bits (which may not be a multiple of 8). This is used by bulkBarrierPreWrite for writes to data and BSS. go:nosplit

func bulkBarrierBitmap(dst uintptr, src uintptr, size uintptr, maskOffset uintptr, bits *uint8)

bulkBarrierPreWrite function #

bulkBarrierPreWrite executes a write barrier for every pointer slot in the memory range [src, src+size), using pointer/scalar information from [dst, dst+size). This executes the write barriers necessary before a memmove. src, dst, and size must be pointer-aligned. The range [dst, dst+size) must lie within a single object. It does not perform the actual writes. As a special case, src == 0 indicates that this is being used for a memclr. bulkBarrierPreWrite will pass 0 for the src of each write barrier. Callers should call bulkBarrierPreWrite immediately before calling memmove(dst, src, size). This function is marked nosplit to avoid being preempted; the GC must not stop the goroutine between the memmove and the execution of the barriers. The caller is also responsible for cgo pointer checks if this may be writing Go pointers into non-Go memory. Pointer data is not maintained for allocations containing no pointers at all; any caller of bulkBarrierPreWrite must first make sure the underlying allocation contains pointers, usually by checking typ.PtrBytes. The typ argument is the type of the space at src and dst (and the element type if src and dst refer to arrays) and it is optional. If typ is nil, the barrier will still behave as expected and typ is used purely as an optimization. However, it must be used with care. If typ is not nil, then src and dst must point to one or more values of type typ. The caller must ensure that the ranges [src, src+size) and [dst, dst+size) refer to one or more whole values of type src and dst (leaving off the pointerless tail of the space is OK). If this precondition is not followed, this function will fail to scan the right pointers. When in doubt, pass nil for typ. That is safe and will always work. Callers must perform cgo checks if goexperiment.CgoCheck2. go:nosplit

func bulkBarrierPreWrite(dst uintptr, src uintptr, size uintptr, typ *abi.Type)

bulkBarrierPreWriteSrcOnly function #

bulkBarrierPreWriteSrcOnly is like bulkBarrierPreWrite but does not execute write barriers for [dst, dst+size). In addition to the requirements of bulkBarrierPreWrite callers need to ensure [dst, dst+size) is zeroed. This is used for special cases where e.g. dst was just created and zeroed with malloc. The type of the space can be provided purely as an optimization. See bulkBarrierPreWrite's comment for more details -- use this optimization with great care. go:nosplit

func bulkBarrierPreWriteSrcOnly(dst uintptr, src uintptr, size uintptr, typ *abi.Type)

byte method #

byte appends v to buf. nosplit because it's part of writing an event for an M, which must not have any stack growth. go:nosplit

func (buf *traceBuf) byte(v byte)

byte method #

go:nosplit

func (l *debugLogWriter) byte(x byte)

bytealg_MakeNoZero function #

go:linkname bytealg_MakeNoZero internal/bytealg.MakeNoZero

func bytealg_MakeNoZero(len int) []byte

bytep method #

bytep returns a pointer to the n'th byte of b.

func (b *gcBits) bytep(n uintptr) *uint8

bytes function #

func bytes(s string) (ret []byte)

bytes method #

go:nosplit

func (l *debugLogWriter) bytes(x []byte)

bytesHasPrefix function #

func bytesHasPrefix(s []byte, prefix []byte) bool

bytesHash function #

func bytesHash(b []byte, seed uintptr) uintptr

c128equal function #

func c128equal(p unsafe.Pointer, q unsafe.Pointer) bool

c128hash function #

func c128hash(p unsafe.Pointer, h uintptr) uintptr

c64equal function #

func c64equal(p unsafe.Pointer, q unsafe.Pointer) bool

c64hash function #

func c64hash(p unsafe.Pointer, h uintptr) uintptr

cacheSpan method #

Allocate a span to use in an mcache.

func (c *mcentral) cacheSpan() *mspan

call1024 function #

func call1024(typ unsafe.Pointer, fn unsafe.Pointer, stackArgs unsafe.Pointer, stackArgsSize uint32, stackRetOffset uint32, frameSize uint32, regArgs *abi.RegArgs)

call1048576 function #

func call1048576(typ unsafe.Pointer, fn unsafe.Pointer, stackArgs unsafe.Pointer, stackArgsSize uint32, stackRetOffset uint32, frameSize uint32, regArgs *abi.RegArgs)

call1073741824 function #

func call1073741824(typ unsafe.Pointer, fn unsafe.Pointer, stackArgs unsafe.Pointer, stackArgsSize uint32, stackRetOffset uint32, frameSize uint32, regArgs *abi.RegArgs)

call128 function #

func call128(typ unsafe.Pointer, fn unsafe.Pointer, stackArgs unsafe.Pointer, stackArgsSize uint32, stackRetOffset uint32, frameSize uint32, regArgs *abi.RegArgs)

call131072 function #

func call131072(typ unsafe.Pointer, fn unsafe.Pointer, stackArgs unsafe.Pointer, stackArgsSize uint32, stackRetOffset uint32, frameSize uint32, regArgs *abi.RegArgs)

call134217728 function #

func call134217728(typ unsafe.Pointer, fn unsafe.Pointer, stackArgs unsafe.Pointer, stackArgsSize uint32, stackRetOffset uint32, frameSize uint32, regArgs *abi.RegArgs)

call16 function #

in asm_*.s not called directly; definitions here supply type information for traceback. These must have the same signature (arg pointer map) as reflectcall.

func call16(typ unsafe.Pointer, fn unsafe.Pointer, stackArgs unsafe.Pointer, stackArgsSize uint32, stackRetOffset uint32, frameSize uint32, regArgs *abi.RegArgs)

call16384 function #

func call16384(typ unsafe.Pointer, fn unsafe.Pointer, stackArgs unsafe.Pointer, stackArgsSize uint32, stackRetOffset uint32, frameSize uint32, regArgs *abi.RegArgs)

call16777216 function #

func call16777216(typ unsafe.Pointer, fn unsafe.Pointer, stackArgs unsafe.Pointer, stackArgsSize uint32, stackRetOffset uint32, frameSize uint32, regArgs *abi.RegArgs)

call2048 function #

func call2048(typ unsafe.Pointer, fn unsafe.Pointer, stackArgs unsafe.Pointer, stackArgsSize uint32, stackRetOffset uint32, frameSize uint32, regArgs *abi.RegArgs)

call2097152 function #

func call2097152(typ unsafe.Pointer, fn unsafe.Pointer, stackArgs unsafe.Pointer, stackArgsSize uint32, stackRetOffset uint32, frameSize uint32, regArgs *abi.RegArgs)

call256 function #

func call256(typ unsafe.Pointer, fn unsafe.Pointer, stackArgs unsafe.Pointer, stackArgsSize uint32, stackRetOffset uint32, frameSize uint32, regArgs *abi.RegArgs)

call262144 function #

func call262144(typ unsafe.Pointer, fn unsafe.Pointer, stackArgs unsafe.Pointer, stackArgsSize uint32, stackRetOffset uint32, frameSize uint32, regArgs *abi.RegArgs)

call268435456 function #

func call268435456(typ unsafe.Pointer, fn unsafe.Pointer, stackArgs unsafe.Pointer, stackArgsSize uint32, stackRetOffset uint32, frameSize uint32, regArgs *abi.RegArgs)

call32 function #

func call32(typ unsafe.Pointer, fn unsafe.Pointer, stackArgs unsafe.Pointer, stackArgsSize uint32, stackRetOffset uint32, frameSize uint32, regArgs *abi.RegArgs)

call32768 function #

func call32768(typ unsafe.Pointer, fn unsafe.Pointer, stackArgs unsafe.Pointer, stackArgsSize uint32, stackRetOffset uint32, frameSize uint32, regArgs *abi.RegArgs)

call33554432 function #

func call33554432(typ unsafe.Pointer, fn unsafe.Pointer, stackArgs unsafe.Pointer, stackArgsSize uint32, stackRetOffset uint32, frameSize uint32, regArgs *abi.RegArgs)

call4096 function #

func call4096(typ unsafe.Pointer, fn unsafe.Pointer, stackArgs unsafe.Pointer, stackArgsSize uint32, stackRetOffset uint32, frameSize uint32, regArgs *abi.RegArgs)

call4194304 function #

func call4194304(typ unsafe.Pointer, fn unsafe.Pointer, stackArgs unsafe.Pointer, stackArgsSize uint32, stackRetOffset uint32, frameSize uint32, regArgs *abi.RegArgs)

call512 function #

func call512(typ unsafe.Pointer, fn unsafe.Pointer, stackArgs unsafe.Pointer, stackArgsSize uint32, stackRetOffset uint32, frameSize uint32, regArgs *abi.RegArgs)

call524288 function #

func call524288(typ unsafe.Pointer, fn unsafe.Pointer, stackArgs unsafe.Pointer, stackArgsSize uint32, stackRetOffset uint32, frameSize uint32, regArgs *abi.RegArgs)

call536870912 function #

func call536870912(typ unsafe.Pointer, fn unsafe.Pointer, stackArgs unsafe.Pointer, stackArgsSize uint32, stackRetOffset uint32, frameSize uint32, regArgs *abi.RegArgs)

call64 function #

func call64(typ unsafe.Pointer, fn unsafe.Pointer, stackArgs unsafe.Pointer, stackArgsSize uint32, stackRetOffset uint32, frameSize uint32, regArgs *abi.RegArgs)

call65536 function #

func call65536(typ unsafe.Pointer, fn unsafe.Pointer, stackArgs unsafe.Pointer, stackArgsSize uint32, stackRetOffset uint32, frameSize uint32, regArgs *abi.RegArgs)

call67108864 function #

func call67108864(typ unsafe.Pointer, fn unsafe.Pointer, stackArgs unsafe.Pointer, stackArgsSize uint32, stackRetOffset uint32, frameSize uint32, regArgs *abi.RegArgs)

call8192 function #

func call8192(typ unsafe.Pointer, fn unsafe.Pointer, stackArgs unsafe.Pointer, stackArgsSize uint32, stackRetOffset uint32, frameSize uint32, regArgs *abi.RegArgs)

call8388608 function #

func call8388608(typ unsafe.Pointer, fn unsafe.Pointer, stackArgs unsafe.Pointer, stackArgsSize uint32, stackRetOffset uint32, frameSize uint32, regArgs *abi.RegArgs)

callCgoMmap function #

callCgoMmap calls the mmap function in the runtime/cgo package using the GCC calling convention. It is implemented in assembly.

func callCgoMmap(addr unsafe.Pointer, n uintptr, prot int32, flags int32, fd int32, off uint32) uintptr

callCgoMunmap function #

callCgoMunmap calls the munmap function in the runtime/cgo package using the GCC calling convention. It is implemented in assembly.

func callCgoMunmap(addr unsafe.Pointer, n uintptr)

callCgoSigaction function #

callCgoSigaction calls the sigaction function in the runtime/cgo package using the GCC calling convention. It is implemented in assembly. go:noescape

func callCgoSigaction(sig uintptr, new *sigactiont, old *sigactiont) int32

callCgoSigaction function #

This is needed for vet. go:noescape

func callCgoSigaction(sig uintptr, new *sigactiont, old *sigactiont) int32

callCgoSymbolizer function #

callCgoSymbolizer calls the cgoSymbolizer function.

func callCgoSymbolizer(arg *cgoSymbolizerArg)

callbackUpdateSystemStack function #

Set or reset the system stack bounds for a callback on sp. Must be nosplit because it is called by needm prior to fully initializing the M. go:nosplit

func callbackUpdateSystemStack(mp *m, sp uintptr, signal bool)

callbackWrap function #

callbackWrap is called by callbackasm to invoke a registered C callback.

func callbackWrap(a *callbackArgs)

callbackasm function #

func callbackasm()

callbackasm1 function #

called from zcallback_windows_*.s to sys_windows_*.s

func callbackasm1()

callbackasmAddr function #

callbackasmAddr returns address of runtime.callbackasm function adjusted by i. On x86 and amd64, runtime.callbackasm is a series of CALL instructions, and we want callback to arrive at correspondent call instruction instead of start of runtime.callbackasm. On ARM, runtime.callbackasm is a series of mov and branch instructions. R12 is loaded with the callback index. Each entry is two instructions, hence 8 bytes.

func callbackasmAddr(i int) uintptr

callers function #

callers should be an internal detail, (and is almost identical to Callers), but widely used packages access it using linkname. Notable members of the hall of shame include: - github.com/phuslu/log Do not remove or change the type signature. See go.dev/issue/67401. go:linkname callers

func callers(skip int, pcbuf []uintptr) int

canPreemptM function #

canPreemptM reports whether mp is in a state that is safe to preempt. It is nosplit because it has nosplit callers. go:nosplit

func canPreemptM(mp *m) bool

canWriteRecord method #

canWriteRecord reports whether the buffer has room for a single contiguous record with a stack of length nstk.

func (b *profBuf) canWriteRecord(nstk int) bool

canWriteTwoRecords method #

canWriteTwoRecords reports whether the buffer has room for two records with stack lengths nstk1, nstk2, in that order. Each record must be contiguous on its own, but the two records need not be contiguous (one can be at the end of the buffer and the other can wrap around and start at the beginning of the buffer).

func (b *profBuf) canWriteTwoRecords(nstk1 int, nstk2 int) bool

canpanic function #

canpanic returns false if a signal should throw instead of panicking. go:nosplit

func canpanic() bool

cansemacquire function #

func cansemacquire(addr *uint32) bool

captureStack method #

func (prof *mLockProfile) captureStack()

cas method #

func (x *profAtomic) cas(old profIndex, new profIndex) bool

cas method #

go:nosplit

func (gp *guintptr) cas(old guintptr, new guintptr) bool

cas method #

cas atomically compares-and-swaps a headTailIndex value.

func (h *atomicHeadTailIndex) cas(old headTailIndex, new headTailIndex) bool

casGFromPreempted function #

casGFromPreempted attempts to transition gp from _Gpreempted to _Gwaiting. If successful, the caller is responsible for re-scheduling gp.

func casGFromPreempted(gp *g, old uint32, new uint32) bool

casGToPreemptScan function #

casGToPreemptScan transitions gp from _Grunning to _Gscan|_Gpreempted. TODO(austin): This is the only status operation that both changes the status and locks the _Gscan bit. Rethink this.

func casGToPreemptScan(gp *g, old uint32, new uint32)

casGToWaiting function #

casGToWaiting transitions gp from old to _Gwaiting, and sets the wait reason. Use this over casgstatus when possible to ensure that a waitreason is set.

func casGToWaiting(gp *g, old uint32, reason waitReason)

casGToWaitingForSuspendG function #

casGToWaitingForSuspendG transitions gp from old to _Gwaiting, and sets the wait reason. The wait reason must be a valid isWaitingForSuspendG wait reason. Use this over casgstatus when possible to ensure that a waitreason is set.

func casGToWaitingForSuspendG(gp *g, old uint32, reason waitReason)

casfrom_Gscanstatus function #

The Gscanstatuses are acting like locks and this releases them. If it proves to be a performance hit we should be able to make these simple atomic stores but for now we are going to throw if we see an inconsistent state.

func casfrom_Gscanstatus(gp *g, oldval uint32, newval uint32)

casgstatus function #

If asked to move to or from a Gscanstatus this will throw. Use the castogscanstatus and casfrom_Gscanstatus instead. casgstatus will loop if the g->atomicstatus is in a Gscan status until the routine that put it in the Gscan state is finished. go:nosplit

func casgstatus(gp *g, oldval uint32, newval uint32)

castogscanstatus function #

This will return false if the gp is not in the expected status and the cas fails. This acts like a lock acquire while the casfromgstatus acts like a lock release.

func castogscanstatus(gp *g, oldval uint32, newval uint32) bool

cbsLock function #

func cbsLock()

cbsUnlock function #

func cbsUnlock()

ccr method #

func (c *sigctxt) ccr() uint64

ccr method #

func (c *sigctxt) ccr() uint32

ccr method #

func (c *sigctxt) ccr() uint64

cgoBindM function #

bindm store the g0 of the current m into a thread-specific value. We allocate a pthread per-thread variable using pthread_key_create, to register a thread-exit-time destructor. We are here setting the thread-specific value of the pthread key, to enable the destructor. So that the pthread_key_destructor would dropm while the C thread is exiting. And the saved g will be used in pthread_key_destructor, since the g stored in the TLS by Go might be cleared in some platforms, before the destructor invoked, so, we restore g by the stored g, before dropm. We store g0 instead of m, to make the assembly code simpler, since we need to restore g0 in runtime.cgocallback. On systems without pthreads, like Windows, bindm shouldn't be used. NOTE: this always runs without a P, so, nowritebarrierrec required. go:nosplit go:nowritebarrierrec

func cgoBindM()

cgoCallers method #

cgoCallers populates pcBuf with the cgo callers of the current frame using the registered cgo unwinder. It returns the number of PCs written to pcBuf. If the current frame is not a cgo frame or if there's no registered cgo unwinder, it returns 0.

func (u *unwinder) cgoCallers(pcBuf []uintptr) int

cgoCheckArg function #

cgoCheckArg is the real work of cgoCheckPointer. The argument p is either a pointer to the value (of type t), or the value itself, depending on indir. The top parameter is whether we are at the top level, where Go pointers are allowed. Go pointers to pinned objects are allowed as long as they don't reference other unpinned pointers.

func cgoCheckArg(t *_type, p unsafe.Pointer, indir bool, top bool, msg string)

cgoCheckBits function #

cgoCheckBits checks the block of memory at src, for up to size bytes, and throws if it finds an unpinned Go pointer. The gcbits mark each pointer value. The src pointer is off bytes into the gcbits. go:nosplit go:nowritebarrier

func cgoCheckBits(src unsafe.Pointer, gcbits *byte, off uintptr, size uintptr)

cgoCheckMemmove function #

cgoCheckMemmove is called when moving a block of memory. It throws if the program is copying a block that contains an unpinned Go pointer into non-Go memory. This is called from generated code when GOEXPERIMENT=cgocheck2 is enabled. go:nosplit go:nowritebarrier

func cgoCheckMemmove(typ *_type, dst unsafe.Pointer, src unsafe.Pointer)

cgoCheckMemmove2 function #

cgoCheckMemmove2 is called when moving a block of memory. dst and src point off bytes into the value to copy. size is the number of bytes to copy. It throws if the program is copying a block that contains an unpinned Go pointer into non-Go memory. go:nosplit go:nowritebarrier

func cgoCheckMemmove2(typ *_type, dst unsafe.Pointer, src unsafe.Pointer, off uintptr, size uintptr)

cgoCheckPointer function #

cgoCheckPointer checks if the argument contains a Go pointer that points to an unpinned Go pointer, and panics if it does.

func cgoCheckPointer(ptr any, arg any)

cgoCheckPtrWrite function #

cgoCheckPtrWrite is called whenever a pointer is stored into memory. It throws if the program is storing an unpinned Go pointer into non-Go memory. This is called from generated code when GOEXPERIMENT=cgocheck2 is enabled. go:nosplit go:nowritebarrier

func cgoCheckPtrWrite(dst *unsafe.Pointer, src unsafe.Pointer)

cgoCheckResult function #

cgoCheckResult is called to check the result parameter of an exported Go function. It panics if the result is or contains any other pointer into unpinned Go memory.

func cgoCheckResult(val any)

cgoCheckSliceCopy function #

cgoCheckSliceCopy is called when copying n elements of a slice. src and dst are pointers to the first element of the slice. typ is the element type of the slice. It throws if the program is copying slice elements that contain unpinned Go pointers into non-Go memory. go:nosplit go:nowritebarrier

func cgoCheckSliceCopy(typ *_type, dst unsafe.Pointer, src unsafe.Pointer, n int)

cgoCheckTypedBlock function #

cgoCheckTypedBlock checks the block of memory at src, for up to size bytes, and throws if it finds an unpinned Go pointer. The type of the memory is typ, and src is off bytes into that type. go:nosplit go:nowritebarrier

func cgoCheckTypedBlock(typ *_type, src unsafe.Pointer, off uintptr, size uintptr)

cgoCheckUnknownPointer function #

cgoCheckUnknownPointer is called for an arbitrary pointer into Go memory. It checks whether that Go memory contains any other pointer into unpinned Go memory. If it does, we panic. The return values are unused but useful to see in panic tracebacks.

func cgoCheckUnknownPointer(p unsafe.Pointer, msg string) (base uintptr, i uintptr)

cgoCheckUsingType function #

cgoCheckUsingType is like cgoCheckTypedBlock, but is a last ditch fall back to look for pointers in src using the type information. We only use this when looking at a value on the stack when the type uses a GC program, because otherwise it's more efficient to use the GC bits. This is called on the system stack. go:nowritebarrier go:systemstack

func cgoCheckUsingType(typ *_type, src unsafe.Pointer, off uintptr, size uintptr)

cgoContextPCs function #

cgoContextPCs gets the PC values from a cgo traceback.

func cgoContextPCs(ctxt uintptr, buf []uintptr)

cgoInRange function #

cgoInRange reports whether p is between start and end. go:nosplit go:nowritebarrierrec

func cgoInRange(p unsafe.Pointer, start uintptr, end uintptr) bool

cgoIsGoPointer function #

cgoIsGoPointer reports whether the pointer is a Go pointer--a pointer to Go memory. We only care about Go memory that might contain pointers. go:nosplit go:nowritebarrierrec

func cgoIsGoPointer(p unsafe.Pointer) bool

cgoKeepAlive function #

cgoKeepAlive is called by cgo-generated code (using go:linkname to get at an unexported name). This call keeps its argument alive until the call site; cgo emits the call after the last possible use of the argument by C code. cgoKeepAlive is marked in the cgo-generated code as //go:noescape, so unlike cgoUse it does not force the argument to escape to the heap. This is used to implement the #cgo noescape directive.

func cgoKeepAlive(any)

cgoNoCallback function #

func cgoNoCallback(v bool)

cgoSigtramp function #

func cgoSigtramp()

cgoSigtramp function #

func cgoSigtramp()

cgoSigtramp function #

func cgoSigtramp()

cgoUse function #

cgoUse is called by cgo-generated code (using go:linkname to get at an unexported name). The calls serve two purposes: 1) they are opaque to escape analysis, so the argument is considered to escape to the heap. 2) they keep the argument alive until the call site; the call is emitted after the end of the (presumed) use of the argument by C. cgoUse should not actually be called (see cgoAlwaysFalse).

func cgoUse(any)

cgocall function #

Call from Go to C. This must be nosplit because it's used for syscalls on some platforms. Syscalls may have untyped arguments on the stack, so it's not safe to grow or scan the stack. cgocall should be an internal detail, but widely used packages access it using linkname. Notable members of the hall of shame include: - github.com/ebitengine/purego Do not remove or change the type signature. See go.dev/issue/67401. go:linkname cgocall go:nosplit

func cgocall(fn unsafe.Pointer, arg unsafe.Pointer) int32

cgocallback function #

Not all cgocallback frames are actually cgocallback, so not all have these arguments. Mark them uintptr so that the GC does not misinterpret memory when the arguments are not present. cgocallback is not called from Go, only from crosscall2. This in turn calls cgocallbackg, which is where we'll find pointer-declared arguments. When fn is nil (frame is saved g), call dropm instead, this is used when the C thread is exiting.

func cgocallback(fn uintptr, frame uintptr, ctxt uintptr)

cgocallbackg function #

Call from C back to Go. fn must point to an ABIInternal Go entry-point. go:nosplit

func cgocallbackg(fn unsafe.Pointer, frame unsafe.Pointer, ctxt uintptr)

cgocallbackg1 function #

func cgocallbackg1(fn unsafe.Pointer, frame unsafe.Pointer, ctxt uintptr)

cgounimpl function #

called from (incomplete) assembly.

func cgounimpl()

chanbuf function #

chanbuf(c, i) is pointer to the i'th slot in the buffer. chanbuf should be an internal detail, but widely used packages access it using linkname. Notable members of the hall of shame include: - github.com/fjl/memsize Do not remove or change the type signature. See go.dev/issue/67401. go:linkname chanbuf

func chanbuf(c *hchan, i uint) unsafe.Pointer

chancap function #

func chancap(c *hchan) int

changegstatus method #

changegstatus is called when the non-lock status of a g changes. It is never called with a Gscanstatus.

func (sg *synctestGroup) changegstatus(gp *g, oldval uint32, newval uint32)

chanlen function #

func chanlen(c *hchan) int

chanparkcommit function #

func chanparkcommit(gp *g, chanLock unsafe.Pointer) bool

chanrecv function #

chanrecv receives on channel c and writes the received data to ep. ep may be nil, in which case received data is ignored. If block == false and no elements are available, returns (false, false). Otherwise, if c is closed, zeros *ep and returns (true, false). Otherwise, fills in *ep with an element and returns (true, true). A non-nil ep must point to the heap or the caller's stack.

func chanrecv(c *hchan, ep unsafe.Pointer, block bool) (selected bool, received bool)

chanrecv1 function #

entry points for <- c from compiled code. go:nosplit

func chanrecv1(c *hchan, elem unsafe.Pointer)

chanrecv2 function #

go:nosplit

func chanrecv2(c *hchan, elem unsafe.Pointer) (received bool)

chansend function #

* generic single channel send/recv * If block is not nil, * then the protocol will not * sleep but return if it could * not complete. * * sleep can wake up with g.param == nil * when a channel involved in the sleep has * been closed. it is easiest to loop and re-run * the operation; we'll see that it's now closed.

func chansend(c *hchan, ep unsafe.Pointer, block bool, callerpc uintptr) bool

chansend1 function #

entry point for c <- x from compiled code. go:nosplit

func chansend1(c *hchan, elem unsafe.Pointer)

cheaprand function #

cheaprand is a non-cryptographic-quality 32-bit random generator suitable for calling at very high frequency (such as during scheduling decisions) and at sensitive moments in the runtime (such as during stack unwinding). it is "cheap" in the sense of both expense and quality. cheaprand must not be exported to other packages: the rule is that other packages using runtime-provided randomness must always use rand. cheaprand should be an internal detail, but widely used packages access it using linkname. Notable members of the hall of shame include: - github.com/bytedance/gopkg Do not remove or change the type signature. See go.dev/issue/67401. go:linkname cheaprand go:nosplit

func cheaprand() uint32

cheaprand64 function #

cheaprand64 is a non-cryptographic-quality 63-bit random generator suitable for calling at very high frequency (such as during sampling decisions). it is "cheap" in the sense of both expense and quality. cheaprand64 must not be exported to other packages: the rule is that other packages using runtime-provided randomness must always use rand. cheaprand64 should be an internal detail, but widely used packages access it using linkname. Notable members of the hall of shame include: - github.com/zhangyunhao116/fastrand Do not remove or change the type signature. See go.dev/issue/67401. go:linkname cheaprand64 go:nosplit

func cheaprand64() int64

cheaprandn function #

cheaprandn is like cheaprand() % n but faster. cheaprandn must not be exported to other packages: the rule is that other packages using runtime-provided randomness must always use randn. cheaprandn should be an internal detail, but widely used packages access it using linkname. Notable members of the hall of shame include: - github.com/phuslu/log Do not remove or change the type signature. See go.dev/issue/67401. go:linkname cheaprandn go:nosplit

func cheaprandn(n uint32) uint32

check function #

func check()

check method #

check runs any timers in ts that are ready. If now is not 0 it is the current time. It returns the passed time or the current time if now was passed as 0. and the time when the next timer should run or 0 if there is no next timer, and reports whether it ran any timers. If the time when the next timer should run is not 0, it is always larger than the returned time. We pass now in and out to avoid extra calls of nanotime. go:yeswritebarrierrec

func (ts *timers) check(now int64) (rnow int64, pollUntil int64, ran bool)

checkASM function #

checkASM reports whether assembly runtime checks have passed.

func checkASM() bool

checkIdleGCNoP function #

Check for idle-priority GC, without a P on entry. If some GC work, a P, and a worker G are all available, the P and G will be returned. The returned P has not been wired yet.

func checkIdleGCNoP() (*p, *g)

checkLockHeld function #

nosplit to ensure it can be called in as many contexts as possible. go:nosplit

func checkLockHeld(gp *g, l *mutex) bool

checkRanks function #

checkRanks checks if goroutine g, which has mostly recently acquired a lock with rank 'prevRank', can now acquire a lock with rank 'rank'. go:systemstack

func checkRanks(gp *g, prevRank lockRank, rank lockRank)

checkRunqsNoP function #

Check all Ps for a runnable G to steal. On entry we have no P. If a G is available to steal and a P is available, the P is returned which the caller should acquire and attempt to steal the work to.

func checkRunqsNoP(allpSnapshot []*p, idlepMaskSnapshot pMask) *p

checkS390xCPU function #

func checkS390xCPU()

checkTimeouts function #

func checkTimeouts()

checkTimeouts function #

func checkTimeouts()

checkTimeouts function #

checkTimeouts resumes goroutines that are waiting on a note which has reached its deadline.

func checkTimeouts()

checkTimeouts function #

func checkTimeouts()

checkTimersNoP function #

Check all Ps for a timer expiring sooner than pollUntil. Returns updated pollUntil value.

func checkTimersNoP(allpSnapshot []*p, timerpMaskSnapshot pMask, pollUntil int64) int64

checkWorldStopped function #

nosplit to ensure it can be called in as many contexts as possible. go:nosplit

func checkWorldStopped() bool

checkdead function #

Check for deadlock situation. The check is based on number of running M's, if 0 -> deadlock. sched.lock must be held.

func checkdead()

checkempty method #

func (b *workbuf) checkempty()

checkfds function #

func checkfds()

checkfds function #

func checkfds()

checkgoarm function #

func checkgoarm()

checkgoarm function #

func checkgoarm()

checkgoarm function #

func checkgoarm()

checkgoarm function #

func checkgoarm()

checkgoarm function #

func checkgoarm()

checkgoarm function #

func checkgoarm()

checkmcount function #

sched.lock must be held.

func checkmcount()

checknonempty method #

func (b *workbuf) checknonempty()

checkptrAlignment function #

func checkptrAlignment(p unsafe.Pointer, elem *_type, n uintptr)

checkptrArithmetic function #

func checkptrArithmetic(p unsafe.Pointer, originals []unsafe.Pointer)

checkptrBase function #

checkptrBase returns the base address for the allocation containing the address p. Importantly, if p1 and p2 point into the same variable, then checkptrBase(p1) == checkptrBase(p2). However, the converse/inverse is not necessarily true as allocations can have trailing padding, and multiple variables may be packed into a single allocation. checkptrBase should be an internal detail, but widely used packages access it using linkname. Notable members of the hall of shame include: - github.com/bytedance/sonic Do not remove or change the type signature. See go.dev/issue/67401. go:linkname checkptrBase

func checkptrBase(p unsafe.Pointer) uintptr

checkptrStraddles function #

checkptrStraddles reports whether the first size-bytes of memory addressed by ptr is known to straddle more than one Go allocation.

func checkptrStraddles(ptr unsafe.Pointer, size uintptr) bool

chunkBase function #

chunkBase returns the base address of the palloc chunk at index ci.

func chunkBase(ci chunkIdx) uintptr

chunkIndex function #

chunkIndex returns the global index of the palloc chunk containing the pointer p.

func chunkIndex(p uintptr) chunkIdx

chunkOf method #

chunkOf returns the chunk at the given chunk index. The chunk index must be valid or this method may throw.

func (p *pageAlloc) chunkOf(ci chunkIdx) *pallocData

chunkPageIndex function #

chunkPageIndex computes the index of the page that contains p, relative to the chunk which contains p.

func chunkPageIndex(p uintptr) uint

cleanHead method #

cleanHead cleans up the head of the timer queue. This speeds up programs that create and delete timers; leaving them in the heap slows down heap operations. The caller must have locked ts.

func (ts *timers) cleanHead()

clear method #

clear clears bit i of pageBits.

func (b *pageBits) clear(i uint)

clear method #

clear cancels this timeout event.

func (e *timeoutEvent) clear()

clear method #

clear clears P id's bit.

func (p pMask) clear(id int32)

clear method #

func (s *sweepClass) clear()

clearAll method #

clearAll frees all the bits of b.

func (b *pageBits) clearAll()

clearBlock64 method #

clearBlock64 clears the 64-bit aligned block of bits containing the i'th bit that are set in v.

func (b *pageBits) clearBlock64(i uint, v uint64)

clearIdleTimeout function #

clearIdleTimeout clears our record of the timeout started by beforeIdle.

func clearIdleTimeout()

clearMarked method #

clearMarked clears the marked bit in the markbits, atomically.

func (m markBits) clearMarked()

clearRange method #

clearRange clears bits in the range [i, i+n).

func (b *pageBits) clearRange(i uint, n uint)

clearSignalHandlers function #

go:nosplit go:nowritebarrierrec

func clearSignalHandlers()

clearSignalHandlers function #

clearSignalHandlers clears all signal handlers that are not ignored back to the default. This is called by the child after a fork, so that we can enable the signal mask for the exec without worrying about running a signal handler in the child. go:nosplit go:nowritebarrierrec

func clearSignalHandlers()

clearSignalHandlers function #

go:nosplit go:nowritebarrierrec

func clearSignalHandlers()

clearSignalHandlers function #

go:nosplit go:nowritebarrierrec

func clearSignalHandlers()

clearTimeoutEvent function #

clearTimeoutEvent clears a timeout event scheduled by scheduleTimeoutEvent. go:wasmimport gojs runtime.clearTimeoutEvent

func clearTimeoutEvent(id int32)

clearpools function #

func clearpools()

clobberfree function #

clobberfree sets the memory content at x to bad content, for debugging purposes.

func clobberfree(x unsafe.Pointer, size uintptr)

clock_gettime function #

go:nosplit

func clock_gettime(clockid int32, tp *timespec) int32

clock_gettime_trampoline function #

func clock_gettime_trampoline()

clock_time_get function #

go:wasmimport wasi_snapshot_preview1 clock_time_get go:noescape

func clock_time_get(clock_id clockid, precision timestamp, time *timestamp) errno

clone function #

go:noescape

func clone(flags int32, stk unsafe.Pointer, mp unsafe.Pointer, gp unsafe.Pointer, fn unsafe.Pointer) int32

cloneInto method #

cloneInto makes a deep clone of a's state into b, re-using b's ranges if able.

func (a *addrRanges) cloneInto(b *addrRanges)

close method #

close wakes any goroutine sleeping on the timer and prevents further sleeping on it. Once close is called, the wakeableSleep must no longer be used. It must only be called once no goroutine is sleeping on the timer *and* nothing else will call wake concurrently.

func (s *wakeableSleep) close()

close method #

close signals that there will be no more writes on the buffer. Once all the data has been read from the buffer, reads will return eof=true.

func (b *profBuf) close()

close_trampoline function #

func close_trampoline()

close_trampoline function #

func close_trampoline()

closechan function #

func closechan(c *hchan)

closefd function #

func closefd(fd int32) int32

closefd function #

go:nosplit go:cgo_unsafe_args

func closefd(fd int32) int32

closefd function #

func closefd(fd int32) int32

closefd function #

go:nosplit

func closefd(fd int32) int32

closefd function #

func closefd(fd int32) int32

closefd function #

go:nosplit

func closefd(fd int32) int32

closefd function #

func closefd(fd int32) int32

closefd function #

func closefd(fd int32) int32

closefd function #

go:nosplit go:cgo_unsafe_args

func closefd(fd int32) int32

closeonexec function #

go:nosplit

func closeonexec(fd int32)

closing method #

func (i pollInfo) closing() bool

commit method #

commit recomputes all pacing parameters needed to derive the trigger and the heap goal. Namely, the gcPercent-based heap goal, and the amount of runway we want to give the GC this cycle. This can be called any time. If GC is the in the middle of a concurrent phase, it will adjust the pacing of that phase. isSweepDone should be the result of calling isSweepDone(), unless we're testing or we know we're executing during a GC cycle. This depends on gcPercent, gcController.heapMarked, and gcController.heapLive. These must be up to date. Callers must call gcControllerState.revise after calling this function if the GC is enabled. mheap_.lock must be held or the world must be stopped.

func (c *gcControllerState) commit(isSweepDone bool)

compileCallback function #

compileCallback converts a Go function fn into a C function pointer that can be passed to Windows APIs. On 386, if cdecl is true, the returned C function will use the cdecl calling convention; otherwise, it will use stdcall. On amd64, it always uses fastcall. On arm, it always uses the ARM convention. go:linkname compileCallback syscall.compileCallback

func compileCallback(fn eface, cdecl bool) (code uintptr)

complex128div function #

func complex128div(n complex128, m complex128) complex128

compute method #

func (f metricReader) compute(_ *statAggregate, out *metricValue)

compute method #

compute populates the heapStatsAggregate with values from the runtime.

func (a *heapStatsAggregate) compute()

compute method #

compute populates the gcStatsAggregate with values from the runtime.

func (a *gcStatsAggregate) compute()

compute method #

compute populates the cpuStatsAggregate with values from the runtime.

func (a *cpuStatsAggregate) compute()

compute method #

compute populates the sysStatsAggregate with values from the runtime.

func (a *sysStatsAggregate) compute()

compute0 function #

func compute0(_ *statAggregate, out *metricValue)

concatbyte2 function #

func concatbyte2(a0 string, a1 string) []byte

concatbyte3 function #

func concatbyte3(a0 string, a1 string, a2 string) []byte

concatbyte4 function #

func concatbyte4(a0 string, a1 string, a2 string, a3 string) []byte

concatbyte5 function #

func concatbyte5(a0 string, a1 string, a2 string, a3 string, a4 string) []byte

concatbytes function #

concatbytes implements a Go string concatenation x+y+z+... returning a slice of bytes. The operands are passed in the slice a.

func concatbytes(a []string) []byte

concatstring2 function #

func concatstring2(buf *tmpBuf, a0 string, a1 string) string

concatstring3 function #

func concatstring3(buf *tmpBuf, a0 string, a1 string, a2 string) string

concatstring4 function #

func concatstring4(buf *tmpBuf, a0 string, a1 string, a2 string, a3 string) string

concatstring5 function #

func concatstring5(buf *tmpBuf, a0 string, a1 string, a2 string, a3 string, a4 string) string

concatstrings function #

concatstrings implements a Go string concatenation x+y+z+... The operands are passed in the slice a. If buf != nil, the compiler has determined that the result does not escape the calling function, so the string data can be stored in buf if small enough.

func concatstrings(buf *tmpBuf, a []string) string

connect function #

func connect(fd int32, addr unsafe.Pointer, len int32) int32

consume method #

consume acquires the partial event CPU time from any in-flight event. It achieves this by storing the current time as the new event time. Returns the type of the in-flight event, as well as how long it's currently been executing for. Returns limiterEventNone if no event is active.

func (e *limiterEvent) consume(now int64) (typ limiterEventType, duration int64)

contains method #

contains returns true if a covers the address addr.

func (a *addrRanges) contains(addr uintptr) bool

contains method #

contains returns whether or not the range contains a given address.

func (a addrRange) contains(addr uintptr) bool

controllerFailed method #

controllerFailed indicates that the scavenger's scheduling controller failed.

func (s *scavengerState) controllerFailed()

convT function #

convT converts a value of type t, which is pointed to by v, to a pointer that can be used as the second word of an interface value.

func convT(t *_type, v unsafe.Pointer) unsafe.Pointer

convT16 function #

func convT16(val uint16) (x unsafe.Pointer)

convT32 function #

func convT32(val uint32) (x unsafe.Pointer)

convT64 function #

convT64 should be an internal detail, but widely used packages access it using linkname. Notable members of the hall of shame include: - github.com/bytedance/sonic Do not remove or change the type signature. See go.dev/issue/67401. go:linkname convT64

func convT64(val uint64) (x unsafe.Pointer)

convTnoptr function #

func convTnoptr(t *_type, v unsafe.Pointer) unsafe.Pointer

convTslice function #

convTslice should be an internal detail, but widely used packages access it using linkname. Notable members of the hall of shame include: - github.com/bytedance/sonic Do not remove or change the type signature. See go.dev/issue/67401. go:linkname convTslice

func convTslice(val []byte) (x unsafe.Pointer)

convTstring function #

convTstring should be an internal detail, but widely used packages access it using linkname. Notable members of the hall of shame include: - github.com/bytedance/sonic Do not remove or change the type signature. See go.dev/issue/67401. go:linkname convTstring

func convTstring(val string) (x unsafe.Pointer)

copyBlockProfileRecord function #

copyBlockProfileRecord copies the sample values and call stack from src to dst. The call stack is copied as-is. The caller is responsible for handling inline expansion, needed when the call stack was collected with frame pointer unwinding.

func copyBlockProfileRecord(dst *BlockProfileRecord, src profilerecord.BlockProfileRecord)

copyKeys function #

func copyKeys(t *maptype, h *hmap, b *bmap, s *slice, offset uint8)

copyMemProfileRecord function #

func copyMemProfileRecord(dst *MemProfileRecord, src profilerecord.MemProfileRecord)

copyValues function #

func copyValues(t *maptype, h *hmap, b *bmap, s *slice, offset uint8)

copysign function #

copysign returns a value with the magnitude of x and the sign of y.

func copysign(x float64, y float64) float64

copystack function #

Copies gp's stack to a new stack of a different size. Caller must have changed gp status to Gcopystack.

func copystack(gp *g, newsize uintptr)

coroexit function #

coroexit is like coroswitch but closes the coro and exits the current goroutine

func coroexit(c *coro)

corostart function #

corostart is the entry func for a new coroutine. It runs the coroutine user function f passed to corostart and then calls coroexit to remove the extra concurrency.

func corostart()

coroswitch function #

coroswitch switches to the goroutine blocked on c and then blocks the current goroutine on c.

func coroswitch(c *coro)

coroswitch_m function #

coroswitch_m is the implementation of coroswitch that runs on the m stack. Note: Coroutine switches are expected to happen at an order of magnitude (or more) higher frequency than regular goroutine switches, so this path is heavily optimized to remove unnecessary work. The fast path here is three CAS: the one at the top on gp.atomicstatus, the one in the middle to choose the next g, and the one at the bottom on gnext.atomicstatus. It is important not to add more atomic operations or other expensive operations to the fast path.

func coroswitch_m(gp *g)

countAlloc method #

countAlloc returns the number of objects allocated in span s by scanning the mark bitmap.

func (s *mspan) countAlloc() int

countSub function #

countSub subtracts two counts obtained from profIndex.dataCount or profIndex.tagCount, assuming that they are no more than 2^29 apart (guaranteed since they are never more than len(data) or len(tags) apart, respectively). tagCount wraps at 2^30, while dataCount wraps at 2^32. This function works for both.

func countSub(x uint32, y uint32) int

countrunes function #

countrunes returns the number of runes in s.

func countrunes(s string) int

coverage_getCovCounterList function #

go:linkname coverage_getCovCounterList internal/coverage/cfile.getCovCounterList

func coverage_getCovCounterList() []rtcov.CovCounterBlob

cpsr method #

func (c *sigctxt) cpsr() uint32

cpsr method #

func (c *sigctxt) cpsr() uint32

cpsr method #

func (c *sigctxt) cpsr() uint32

cpsr method #

func (c *sigctxt) cpsr() uint32

cpuinit function #

cpuinit sets up CPU feature flags and calls internal/cpu.Initialize. env should be the complete value of the GODEBUG environment variable.

func cpuinit(env string)

cpuset_getaffinity function #

go:noescape

func cpuset_getaffinity(level int, which int, id int64, size int, mask *byte) int32

cputicks function #

go:nosplit

func cputicks() int64

cputicks function #

go:nosplit

func cputicks() int64

cputicks function #

go:nosplit

func cputicks() int64

cputicks function #

go:nosplit

func cputicks() int64

cputicks function #

careful: cputicks is not guaranteed to be monotonic! In particular, we have noticed drift between cpus on certain os/arch combinations. See issue 8976.

func cputicks() int64

cputicks function #

go:nosplit

func cputicks() int64

cputicks function #

go:nosplit

func cputicks() int64

cputicks function #

go:nosplit

func cputicks() int64

cputicks function #

go:nosplit

func cputicks() int64

cputicks function #

go:nosplit

func cputicks() int64

cputicks function #

go:nosplit

func cputicks() int64

cputicks function #

go:nosplit

func cputicks() int64

cputicks function #

go:nosplit

func cputicks() int64

cputicks function #

go:nosplit

func cputicks() int64

cputicks function #

go:nosplit

func cputicks() int64

cputicks function #

go:nosplit

func cputicks() int64

cputicks function #

go:nosplit

func cputicks() int64

crash function #

go:nosplit

func crash()

crash function #

func crash()

crash function #

go:nosplit

func crash()

crash function #

go:nosplit

func crash()

create function #

func create(name *byte, perm int32) int32

create function #

create returns an fd to a write-only file.

func create(name *byte, perm int32) int32

createHighResTimer function #

createHighResTimer calls CreateWaitableTimerEx with CREATE_WAITABLE_TIMER_HIGH_RESOLUTION flag to create high resolution timer. createHighResTimer returns new timer handle or 0, if CreateWaitableTimerEx failed.

func createHighResTimer() uintptr

createOverflow method #

func (h *hmap) createOverflow()

createfing function #

func createfing()

cregs method #

func (c *sigctxt) cregs() *sigcontext

crypto_x509_syscall function #

go:linkname crypto_x509_syscall crypto/x509/internal/macos.syscall go:nosplit

func crypto_x509_syscall(fn uintptr, a1 uintptr, a2 uintptr, a3 uintptr, a4 uintptr, a5 uintptr, f1 float64) (r1 uintptr)

cs method #

func (c *sigctxt) cs() uint64

cs method #

func (c *sigctxt) cs() uint64

cs method #

func (c *sigctxt) cs() uint64

cs method #

func (c *sigctxt) cs() uint64

cs method #

func (c *sigctxt) cs() uint64

cs method #

func (c *sigctxt) cs() uint32

cs method #

func (c *sigctxt) cs() uint32

cs method #

func (c *sigctxt) cs() uint64

cs method #

func (c *sigctxt) cs() uint64

cs method #

func (c *sigctxt) cs() uint32

cs method #

func (c *sigctxt) cs() uint32

cstring function #

func cstring(s string) unsafe.Pointer

ctr method #

func (c *sigctxt) ctr() uint64

ctr method #

func (c *sigctxt) ctr() uint64

ctr method #

func (c *sigctxt) ctr() uint64

ctrlHandler function #

func ctrlHandler(_type uint32) uintptr

ctx method #

func (c *_DISPATCHER_CONTEXT) ctx() *context

ctx method #

func (c *_DISPATCHER_CONTEXT) ctx() *context

ctx method #

func (c *_DISPATCHER_CONTEXT) ctx() *context

ctx method #

func (c *_DISPATCHER_CONTEXT) ctx() *context

currentMemory function #

func currentMemory() int32

dataCount method #

func (x profIndex) dataCount() uint32

debugCallCheck function #

debugCallCheck checks whether it is safe to inject a debugger function call with return PC pc. If not, it returns a string explaining why. go:nosplit

func debugCallCheck(pc uintptr) string

debugCallPanicked function #

func debugCallPanicked(val any)

debugCallV2 function #

func debugCallV2()

debugCallWrap function #

debugCallWrap starts a new goroutine to run a debug call and blocks the calling goroutine. On the goroutine, it prepares to recover panics from the debug call, and then calls the call dispatching function at PC dispatch. This must be deeply nosplit because there are untyped values on the stack from debugCallV2. go:nosplit

func debugCallWrap(dispatch uintptr)

debugCallWrap1 function #

debugCallWrap1 is the continuation of debugCallWrap on the callee goroutine.

func debugCallWrap1()

debugCallWrap2 function #

func debugCallWrap2(dispatch uintptr)

debugPinnerV1 function #

debugPinnerV1 returns a new Pinner that pins itself. This function can be used by debuggers to easily obtain a Pinner that will not be garbage collected (or moved in memory) even if no references to it exist in the target program. This pinner in turn can be used to extend this property to other objects, which debuggers can use to simplify the evaluation of expressions involving multiple call injections.

func debugPinnerV1() *Pinner

debug_modinfo function #

go:linkname debug_modinfo runtime/debug.modinfo

func debug_modinfo() string

decActive method #

decActive decrements the active-count for the group.

func (sg *synctestGroup) decActive()

decHead method #

decHead atomically decrements the head of a headTailIndex.

func (h *atomicHeadTailIndex) decHead() headTailIndex

decPinCounter method #

decPinCounter decreases the counter. If the counter reaches 0, the counter special is deleted and false is returned. Otherwise true is returned.

func (span *mspan) decPinCounter(offset uintptr) bool

decoderune function #

decoderune returns the non-ASCII rune at the start of s[k:] and the index after the rune in s. decoderune assumes that caller has checked that the to be decoded rune is a non-ASCII rune. If the string appears to be incomplete or decoding problems are encountered (runeerror, k + 1) is returned to ensure progress when decoderune is used to iterate over a string.

func decoderune(s string, k int) (r rune, pos int)

deductAssistCredit function #

deductAssistCredit reduces the current G's assist credit by size bytes, and assists the GC if necessary. Caller must be preemptible. Returns the G for which the assist credit was accounted.

func deductAssistCredit(size uintptr)

deductSweepCredit function #

deductSweepCredit deducts sweep credit for allocating a span of size spanBytes. This must be performed *before* the span is allocated to ensure the system has enough credit. If necessary, it performs sweeping to prevent going in to debt. If the caller will also sweep pages (e.g., for a large allocation), it can pass a non-zero callerSweepPages to leave that many pages unswept. deductSweepCredit makes a worst-case assumption that all spanBytes bytes of the ultimately allocated span will be available for object allocation. deductSweepCredit is the core of the "proportional sweep" system. It uses statistics gathered by the garbage collector to perform enough sweeping so that all pages are swept during the concurrent sweep phase between GC cycles. mheap_ must NOT be locked.

func deductSweepCredit(spanBytes uintptr, callerSweepPages uintptr)

deferconvert function #

deferconvert converts the rangefunc defer list of d0 into an ordinary list following d0. See the doc comment for deferrangefunc for details.

func deferconvert(d0 *_defer)

deferproc function #

Create a new deferred function fn, which has no arguments and results. The compiler turns a defer statement into a call to this.

func deferproc(fn func())

deferprocStack function #

deferprocStack queues a new deferred function with a defer record on the stack. The defer record must have its fn field initialized. All other fields can contain junk. Nosplit because of the uninitialized pointer fields on the stack. go:nosplit

func deferprocStack(d *_defer)

deferprocat function #

deferprocat is like deferproc but adds to the atomic list represented by frame. See the doc comment for deferrangefunc for details.

func deferprocat(fn func(), frame any)

deferrangefunc function #

deferrangefunc is called by functions that are about to execute a range-over-function loop in which the loop body may execute a defer statement. That defer needs to add to the chain for the current function, not the func literal synthesized to represent the loop body. To do that, the original function calls deferrangefunc to obtain an opaque token representing the current frame, and then the loop body uses deferprocat instead of deferproc to add to that frame's defer lists. The token is an 'any' with underlying type *atomic.Pointer[_defer]. It is the atomically-updated head of a linked list of _defer structs representing deferred calls. At the same time, we create a _defer struct on the main g._defer list with d.head set to this head pointer. The g._defer list is now a linked list of deferred calls, but an atomic list hanging off: g._defer => d4 -> d3 -> drangefunc -> d2 -> d1 -> nil | .head | +--> dY -> dX -> nil with each -> indicating a d.link pointer, and where drangefunc has the d.rangefunc = true bit set. Note that the function being ranged over may have added its own defers (d4 and d3), so drangefunc need not be at the top of the list when deferprocat is used. This is why we pass the atomic head explicitly. To keep misbehaving programs from crashing the runtime, deferprocat pushes new defers onto the .head list atomically. The fact that it is a separate list from the main goroutine defer list means that the main goroutine's defers can still be handled non-atomically. In the diagram, dY and dX are meant to be processed when drangefunc would be processed, which is to say the defer order should be d4, d3, dY, dX, d2, d1. To make that happen, when defer processing reaches a d with rangefunc=true, it calls deferconvert to atomically take the extras away from d.head and then adds them to the main list. That is, deferconvert changes this list: g._defer => drangefunc -> d2 -> d1 -> nil | .head | +--> dY -> dX -> nil into this list: g._defer => dY -> dX -> d2 -> d1 -> nil It also poisons *drangefunc.head so that any future deferprocat using that head will throw. (The atomic head is ordinary garbage collected memory so that it's not a problem if user code holds onto it beyond the lifetime of drangefunc.) TODO: We could arrange for the compiler to call into the runtime after the loop finishes normally, to do an eager deferconvert, which would catch calling the loop body and having it defer after the loop is done. If we have a more general catch of loop body misuse, though, this might not be worth worrying about in addition. See also ../cmd/compile/internal/rangefunc/rewrite.go.

func deferrangefunc() any

deferreturn function #

deferreturn runs deferred functions for the caller's frame. The compiler inserts a call to this at the end of any function which calls defer.

func deferreturn()

deleteMin method #

deleteMin removes timer 0 from ts. ts must be locked.

func (ts *timers) deleteMin()

dequeue method #

dequeue searches for and finds the first goroutine in semaRoot blocked on addr. If the sudog was being profiled, dequeue returns the time at which it was woken up as now. Otherwise now is 0. If there are additional entries in the wait list, dequeue returns tailtime set to the last entry's acquiretime. Otherwise tailtime is found.acquiretime.

func (root *semaRoot) dequeue(addr *uint32) (found *sudog, now int64, tailtime int64)

dequeue method #

func (q *waitq) dequeue() *sudog

dequeueSudoG method #

func (q *waitq) dequeueSudoG(sgp *sudog)

destroy method #

destroy releases all of the resources associated with pp and transitions it to status _Pdead. sched.lock must be held and the world must be stopped.

func (pp *p) destroy()

dieFromException function #

dieFromException raises an exception that bypasses all exception handlers. This provides the expected exit status for the shell. go:nosplit

func dieFromException(info *exceptionrecord, r *context)

dieFromSignal function #

dieFromSignal kills the program with a signal. This provides the expected exit status for the shell. This is only called with fatal signals expected to kill the process. go:nosplit go:nowritebarrierrec

func dieFromSignal(sig uint32)

diff method #

diff calculates the difference of the event's trigger time and x.

func (e *timeoutEvent) diff(x int64) int64

diff method #

diff returns the amount of bytes in between the two offAddrs.

func (l1 offAddr) diff(l2 offAddr) uintptr

difference method #

difference returns set difference of s from b as a new set.

func (s statDepSet) difference(b statDepSet) statDepSet

discard method #

discard resets b's next pointer, but not its end pointer. This must be nosplit because it's called by wbBufFlush. go:nosplit

func (b *wbBuf) discard()

dispose method #

dispose returns any cached pointers to the global queue. The buffers are being put on the full queue so that the write barriers will not simply reacquire them before the GC can inspect them. This helps reduce the mutator's ability to hide pointers during the concurrent mark phase. go:nowritebarrierrec

func (w *gcWork) dispose()

divRoundUp function #

divRoundUp returns ceil(n / a). go:nosplit

func divRoundUp(n uintptr, a uintptr) uintptr

divideByElemSize method #

divideByElemSize returns n/s.elemsize. n must be within [0, s.npages*_PageSize), or may be exactly s.npages*_PageSize if s.elemsize is from sizeclasses.go. nosplit, because it is called by objIndex, which is nosplit go:nosplit

func (s *mspan) divideByElemSize(n uintptr) uintptr

divlu function #

128/64 -> 64 quotient, 64 remainder. adapted from hacker's delight

func divlu(u1 uint64, u0 uint64, v uint64) (q uint64, r uint64)

dlog function #

dlog returns a debug logger. The caller can use methods on the returned logger to add values, which will be space-separated in the final output, much like println. The caller must call end() to finish the message. dlog can be used from highly-constrained corners of the runtime: it is safe to use in the signal handler, from within the write barrier, from within the stack implementation, and in places that must be recursively nosplit. This will be compiled away if built without the debuglog build tag. However, argument construction may not be. If any of the arguments are not literals or trivial expressions, consider protecting the call with "if dlogEnabled". go:nosplit go:nowritebarrierrec

func dlog() dlogger

dlog1 function #

func dlog1() dloggerFake

dlog1 function #

func dlog1() *dloggerImpl

dlogFake function #

go:nosplit go:nowritebarrierrec

func dlogFake() dloggerFake

dlogImpl function #

go:nosplit go:nowritebarrierrec

func dlogImpl() *dloggerImpl

doInit function #

func doInit(ts []*initTask)

doInit1 function #

func doInit1(t *initTask)

doMmap function #

go:nosplit go:cgo_unsafe_args

func doMmap(addr uintptr, n uintptr, prot uintptr, flags uintptr, fd uintptr, off uintptr) (uintptr, uintptr)

doRecordGoroutineProfile function #

doRecordGoroutineProfile writes gp1's call stack and labels to an in-progress goroutine profile. Preemption is disabled. This may be called via tryRecordGoroutineProfile in two ways: by the goroutine that is coordinating the goroutine profile (running on its own stack), or from the scheduler in preparation to execute gp1 (running on the system stack).

func doRecordGoroutineProfile(gp1 *g, pcbuf []uintptr)

doSigPreempt function #

doSigPreempt handles a preemption signal on gp.

func doSigPreempt(gp *g, ctxt *sigctxt)

doasanread function #

go:noescape

func doasanread(addr unsafe.Pointer, sz uintptr, sp uintptr, pc uintptr)

doasanwrite function #

go:noescape

func doasanwrite(addr unsafe.Pointer, sz uintptr, sp uintptr, pc uintptr)

dodiv function #

go:nosplit

func dodiv(n uint64, d uint64) (q uint64, r uint64)

dofiles function #

dofiles reads the directory opened with file descriptor fd, applying function f to each filename in it. go:nosplit

func dofiles(dirfd int32, f func([]byte))

dolockOSThread function #

dolockOSThread is called by LockOSThread and lockOSThread below after they modify m.locked. Do not allow preemption during this call, or else the m might be different in this function than in the caller. go:nosplit

func dolockOSThread()

domsanread function #

go:noescape

func domsanread(addr unsafe.Pointer, sz uintptr)

done method #

func (enum *randomEnum) done() bool

dopanic_m function #

gp is the crashing g running on this M, but may be a user G, while getg() is always g0.

func dopanic_m(gp *g, pc uintptr, sp uintptr) bool

doubleCheckHeapPointers function #

func doubleCheckHeapPointers(x uintptr, dataSize uintptr, typ *_type, header **_type, span *mspan)

doubleCheckHeapPointersInterior function #

func doubleCheckHeapPointersInterior(x uintptr, interior uintptr, size uintptr, dataSize uintptr, typ *_type, header **_type, span *mspan)

doubleCheckHeapType function #

func doubleCheckHeapType(x uintptr, dataSize uintptr, gctyp *_type, header **_type, span *mspan)

doubleCheckTypePointersOfType function #

go:nosplit

func doubleCheckTypePointersOfType(s *mspan, typ *_type, addr uintptr, size uintptr)

dounlockOSThread function #

dounlockOSThread is called by UnlockOSThread and unlockOSThread below after they update m->locked. Do not allow preemption during this call, or else the m might be in different in this function than in the caller. go:nosplit

func dounlockOSThread()

drop method #

drop frees all previously allocated memory and resets the allocator. drop is not safe to call concurrently with other calls to drop or with calls to alloc. The caller must ensure that it is not possible for anything else to be using the same structure.

func (a *traceRegionAlloc) drop()

dropg function #

dropg removes the association between m and the current goroutine m->curg (gp for short). Typically a caller sets gp's status away from Grunning and then immediately calls dropg to finish the job. The caller is also responsible for arranging that gp will be restarted using ready at an appropriate time. After calling dropg and arranging for gp to be readied later, the caller can do other work but eventually should call schedule to restart the scheduling of goroutines on this m.

func dropg()

dropm function #

dropm puts the current m back onto the extra list. 1. On systems without pthreads, like Windows dropm is called when a cgo callback has called needm but is now done with the callback and returning back into the non-Go thread. The main expense here is the call to signalstack to release the m's signal stack, and then the call to needm on the next callback from this thread. It is tempting to try to save the m for next time, which would eliminate both these costs, but there might not be a next time: the current thread (which Go does not control) might exit. If we saved the m for that thread, there would be an m leak each time such a thread exited. Instead, we acquire and release an m on each call. These should typically not be scheduling operations, just a few atomics, so the cost should be small. 2. On systems with pthreads dropm is called while a non-Go thread is exiting. We allocate a pthread per-thread variable using pthread_key_create, to register a thread-exit-time destructor. And store the g into a thread-specific value associated with the pthread key, when first return back to C. So that the destructor would invoke dropm while the non-Go thread is exiting. This is much faster since it avoids expensive signal-related syscalls. This always runs without a P, so //go:nowritebarrierrec is required. This may run with a different stack than was recorded in g0 (there is no call to callbackUpdateSystemStack prior to dropm), so this must be //go:nosplit to avoid the stack bounds check. go:nowritebarrierrec go:nosplit

func dropm()

duffcopy function #

func duffcopy()

duffzero function #

func duffzero()

dump method #

dump writes all previously cached types to trace buffers and releases all memory and resets state. It must only be called once the caller can guarantee that there are no more writers to the table.

func (t *traceTypeTable) dump(gen uintptr)

dump method #

dump writes all previously cached stacks to trace buffers, releases all memory and resets state. It must only be called once the caller can guarantee that there are no more writers to the table.

func (t *traceStackTable) dump(gen uintptr)

dumpGCProg function #

func dumpGCProg(p *byte)

dumpStacksRec function #

func dumpStacksRec(node *traceMapNode, w traceWriter, stackBuf []uintptr) traceWriter

dumpTypePointers function #

func dumpTypePointers(tp typePointers)

dumpTypesRec function #

func dumpTypesRec(node *traceMapNode, w traceWriter) traceWriter

dumpbool function #

func dumpbool(b bool)

dumpbv function #

dump kinds & offsets of interesting fields in bv.

func dumpbv(cbv *bitvector, offset uintptr)

dumpfields function #

dumpint() the kind & offset of each field in an object.

func dumpfields(bv bitvector)

dumpfinalizer function #

func dumpfinalizer(obj unsafe.Pointer, fn *funcval, fint *_type, ot *ptrtype)

dumpframe function #

func dumpframe(s *stkframe, child *childInfo)

dumpgoroutine function #

func dumpgoroutine(gp *g)

dumpgs function #

func dumpgs()

dumpgstatus function #

func dumpgstatus(gp *g)

dumpint function #

dump a uint64 in a varint format parseable by encoding/binary.

func dumpint(v uint64)

dumpitabs function #

func dumpitabs()

dumpmemprof function #

func dumpmemprof()

dumpmemprof_callback function #

func dumpmemprof_callback(b *bucket, nstk uintptr, pstk *uintptr, size uintptr, allocs uintptr, frees uintptr)

dumpmemrange function #

dump varint uint64 length followed by memory contents.

func dumpmemrange(data unsafe.Pointer, len uintptr)

dumpmemstats function #

go:systemstack

func dumpmemstats(m *MemStats)

dumpms function #

func dumpms()

dumpobj function #

dump an object.

func dumpobj(obj unsafe.Pointer, size uintptr, bv bitvector)

dumpobjs function #

func dumpobjs()

dumpotherroot function #

func dumpotherroot(description string, to unsafe.Pointer)

dumpparams function #

func dumpparams()

dumpregs function #

func dumpregs(r *context)

dumpregs function #

func dumpregs(r *context)

dumpregs function #

func dumpregs(c *sigctxt)

dumpregs function #

func dumpregs(u *ureg)

dumpregs function #

func dumpregs(r *context)

dumpregs function #

func dumpregs(c *sigctxt)

dumpregs function #

func dumpregs(u *ureg)

dumpregs function #

func dumpregs(c *sigctxt)

dumpregs function #

func dumpregs(c *sigctxt)

dumpregs function #

func dumpregs(c *sigctxt)

dumpregs function #

func dumpregs(c *sigctxt)

dumpregs function #

func dumpregs(u *ureg)

dumpregs function #

func dumpregs(c *sigctxt)

dumpregs function #

func dumpregs(r *context)

dumpregs function #

func dumpregs(c *sigctxt)

dumpregs function #

func dumpregs(c *sigctxt)

dumpregs function #

func dumpregs(c *sigctxt)

dumproots function #

func dumproots()

dumpslice function #

func dumpslice(b []byte)

dumpstr function #

func dumpstr(s string)

dumptype function #

dump information for a type.

func dumptype(t *_type)

duration method #

duration computes the difference between now and the start time stored in the stamp. Returns 0 if the difference is negative, which may happen if now is stale or if the before and after timestamps cross a 2^(64-limiterEventBits) boundary.

func (s limiterEventStamp) duration(now int64) int64

dwrite function #

func dwrite(data unsafe.Pointer, len uintptr)

dwritebyte function #

func dwritebyte(b byte)

eax method #

func (c *sigctxt) eax() uint32

eax method #

func (c *sigctxt) eax() uint32

eax method #

func (c *sigctxt) eax() uint32

eax method #

func (c *sigctxt) eax() uint32

ebp method #

func (c *sigctxt) ebp() uint32

ebp method #

func (c *sigctxt) ebp() uint32

ebp method #

func (c *sigctxt) ebp() uint32

ebp method #

func (c *sigctxt) ebp() uint32

ebx method #

func (c *sigctxt) ebx() uint32

ebx method #

func (c *sigctxt) ebx() uint32

ebx method #

func (c *sigctxt) ebx() uint32

ebx method #

func (c *sigctxt) ebx() uint32

ecx method #

func (c *sigctxt) ecx() uint32

ecx method #

func (c *sigctxt) ecx() uint32

ecx method #

func (c *sigctxt) ecx() uint32

ecx method #

func (c *sigctxt) ecx() uint32

edi method #

func (c *sigctxt) edi() uint32

edi method #

func (c *sigctxt) edi() uint32

edi method #

func (c *sigctxt) edi() uint32

edi method #

func (c *sigctxt) edi() uint32

edx method #

func (c *sigctxt) edx() uint32

edx method #

func (c *sigctxt) edx() uint32

edx method #

func (c *sigctxt) edx() uint32

edx method #

func (c *sigctxt) edx() uint32

efaceHash function #

func efaceHash(i any, seed uintptr) uintptr

efaceOf function #

func efaceOf(ep *any) *eface

efaceeq function #

func efaceeq(t *_type, x unsafe.Pointer, y unsafe.Pointer) bool

eflags method #

func (c *sigctxt) eflags() uint32

eflags method #

func (c *sigctxt) eflags() uint32

eflags method #

func (c *sigctxt) eflags() uint32

eflags method #

func (c *sigctxt) eflags() uint32

eip method #

go:nosplit go:nowritebarrierrec

func (c *sigctxt) eip() uint32

eip method #

go:nosplit go:nowritebarrierrec

func (c *sigctxt) eip() uint32

eip method #

go:nosplit go:nowritebarrierrec

func (c *sigctxt) eip() uint32

eip method #

go:nosplit go:nowritebarrierrec

func (c *sigctxt) eip() uint32

elideWrapperCalling function #

elideWrapperCalling reports whether a wrapper function that called function id should be elided from stack traces.

func elideWrapperCalling(id abi.FuncID) bool

emit method #

emit emits a string and creates an ID for it, but doesn't add it to the table. Returns the ID.

func (t *traceStringTable) emit(gen uintptr, s string) uint64

emitUnblockStatus method #

emitUnblockStatus emits a GoStatus GoWaiting event for a goroutine about to be unblocked to the trace writer.

func (tl traceLocker) emitUnblockStatus(gp *g, gen uintptr)

empty method #

empty reports whether l is empty.

func (l *gList) empty() bool

empty method #

empty reports whether the page cache has no free pages.

func (c *pageCache) empty() bool

empty method #

empty returns true if there are no dependencies in the set.

func (s *statDepSet) empty() bool

empty method #

func (head *lfstack) empty() bool

empty function #

empty reports whether a read from c would block (that is, the channel is empty). It is atomically correct and sequentially consistent at the moment it returns, but since the channel is unlocked, the channel may become non-empty immediately afterward.

func empty(c *hchan) bool

empty method #

empty reports whether w has no mark work available. go:nowritebarrierrec

func (w *gcWork) empty() bool

empty method #

empty reports whether b contains no pointers.

func (b *wbBuf) empty() bool

empty method #

empty reports whether q is empty.

func (q *gQueue) empty() bool

empty method #

func (q *traceBufQueue) empty() bool

emptyfunc function #

func emptyfunc()

emptyfunc function #

func emptyfunc()

emptyfunc function #

func emptyfunc()

enableChunkHugePages method #

enableChunkHugePages enables huge pages for the chunk bitmap mappings (disabled by default). This function is idempotent. A note on latency: for sufficiently small heaps (<10s of GiB) this function will take constant time, but may take time proportional to the size of the mapped heap beyond that. The heap lock must not be held over this operation, since it will briefly acquire the heap lock. Must be called on the system stack because it acquires the heap lock. go:systemstack

func (p *pageAlloc) enableChunkHugePages()

enableMetadataHugePages method #

enableMetadataHugePages enables huge pages for various sources of heap metadata. A note on latency: for sufficiently small heaps (<10s of GiB) this function will take constant time, but may take time proportional to the size of the mapped heap beyond that. This function is idempotent. The heap lock must not be held over this operation, since it will briefly acquire the heap lock. Must be called on the system stack because it acquires the heap lock. go:systemstack

func (h *mheap) enableMetadataHugePages()

enableWER function #

enableWER is called by setTraceback("wer"). Windows Error Reporting (WER) is only supported on Windows.

func enableWER()

enableWER function #

enableWER re-enables Windows error reporting without fault reporting UI.

func enableWER()

encoderune function #

encoderune writes into p (which must be large enough) the UTF-8 encoding of the rune. It returns the number of bytes written.

func encoderune(p []byte, r rune) int

end method #

func (lt *lockTimer) end()

end method #

end writes the buffer back into the m. nosplit because it's part of writing an event for an M, which must not have any stack growth. go:nosplit

func (w traceWriter) end()

end method #

end extracts the end value from a packed sum.

func (p pallocSum) end() uint

end method #

go:nosplit

func (l *dloggerImpl) end()

end method #

go:nosplit

func (l dloggerFake) end()

end method #

end deregisters a sweeper. Must be called once for each time begin is called if the sweepLocker is valid.

func (a *activeSweep) end(sl sweepLocker)

endCheckmarks function #

endCheckmarks ends the checkmarks phase.

func endCheckmarks()

endCycle method #

endCycle computes the consMark estimate for the next cycle. userForced indicates whether the current GC cycle was forced by the application.

func (c *gcControllerState) endCycle(now int64, procs int, userForced bool)

enlistWorker method #

enlistWorker encourages another dedicated mark worker to start on another P if there are spare worker slots. It is used by putfull when more work is made available. go:nowritebarrier

func (c *gcControllerState) enlistWorker()

enqueue method #

func (q *waitq) enqueue(sgp *sudog)

ensure method #

ensure makes sure that at least maxSize bytes are available to write. Returns whether the buffer was flushed. nosplit because it's part of writing an event for an M, which must not have any stack growth. go:nosplit

func (w traceWriter) ensure(maxSize int) (traceWriter, bool)

ensure method #

go:nosplit

func (l *debugLogWriter) ensure(n uint64)

ensure method #

ensure populates statistics aggregates determined by deps if they haven't yet been populated.

func (a *statAggregate) ensure(deps *statDepSet)

ensureSigM function #

ensureSigM starts one global, sleeping thread to make sure at least one thread is available to catch signals enabled for os/signal.

func ensureSigM()

ensureSwept method #

Returns only when span s has been swept. go:nowritebarrier

func (s *mspan) ensureSwept()

entersyscall function #

Standard syscall entry used by the go syscall library and normal cgo calls. This is exported via linkname to assembly in the syscall package and x/sys. Other packages should not be accessing entersyscall directly, but widely used packages access it using linkname. Notable members of the hall of shame include: - gvisor.dev/gvisor Do not remove or change the type signature. See go.dev/issue/67401. go:nosplit go:linkname entersyscall

func entersyscall()

entersyscall_gcwait function #

func entersyscall_gcwait()

entersyscall_sysmon function #

func entersyscall_sysmon()

entersyscallblock function #

entersyscallblock should be an internal detail, but widely used packages access it using linkname. Notable members of the hall of shame include: - gvisor.dev/gvisor Do not remove or change the type signature. See go.dev/issue/67401. go:linkname entersyscallblock go:nosplit

func entersyscallblock()

entersyscallblock_handoff function #

func entersyscallblock_handoff()

entry method #

entry returns the entry PC for f. entry should be an internal detail, but widely used packages access it using linkname. Notable members of the hall of shame include: - github.com/phuslu/log Do not remove or change the type signature. See go.dev/issue/67401.

func (f funcInfo) entry() uintptr

envKeyEqual function #

envKeyEqual reports whether a == b, with ASCII-only case insensitivity on Windows. The two strings must have the same length.

func envKeyEqual(a string, b string) bool

environ function #

func environ() []string

environ_get function #

go:wasmimport wasi_snapshot_preview1 environ_get go:noescape

func environ_get(environ *uintptr32, environBuf *byte) errno

environ_sizes_get function #

go:wasmimport wasi_snapshot_preview1 environ_sizes_get go:noescape

func environ_sizes_get(environCount *size, environBufLen *size) errno

eqslice function #

func eqslice(x []uintptr, y []uintptr) bool

equal method #

equal returns true if the two offAddr values are equal.

func (l1 offAddr) equal(l2 offAddr) bool

errno function #

func errno() int32

error method #

func (c *sigctxt) error() uint32

error method #

func (c *sigctxt) error() uint32

error method #

func (c *sigctxt) error() uint32

error method #

func (c *sigctxt) error() uint32

error method #

func (c *sigctxt) error() uint64

errstr function #

func errstr() string

esi method #

func (c *sigctxt) esi() uint32

esi method #

func (c *sigctxt) esi() uint32

esi method #

func (c *sigctxt) esi() uint32

esi method #

func (c *sigctxt) esi() uint32

esp method #

func (c *sigctxt) esp() uint32

esp method #

func (c *sigctxt) esp() uint32

esp method #

func (c *sigctxt) esp() uint32

esp method #

func (c *sigctxt) esp() uint32

evacuate function #

func evacuate(t *maptype, h *hmap, oldbucket uintptr)

evacuate_fast32 function #

func evacuate_fast32(t *maptype, h *hmap, oldbucket uintptr)

evacuate_fast64 function #

func evacuate_fast64(t *maptype, h *hmap, oldbucket uintptr)

evacuate_faststr function #

func evacuate_faststr(t *maptype, h *hmap, oldbucket uintptr)

evacuated function #

func evacuated(b *bmap) bool

event method #

event writes out a trace event.

func (e traceEventWriter) event(ev traceEv, args ...traceArg)

event method #

event writes out the bytes of an event into the event stream. nosplit because it's part of writing an event for an M, which must not have any stack growth. go:nosplit

func (w traceWriter) event(ev traceEv, args ...traceArg) traceWriter

eventErr method #

func (i pollInfo) eventErr() bool

eventWriter method #

eventWriter creates a new traceEventWriter. It is the main entrypoint for writing trace events. Before creating the event writer, this method will emit a status for the current goroutine or proc if it exists, and if it hasn't had its status emitted yet. goStatus and procStatus indicate what the status of goroutine or P should be immediately *before* the events that are about to be written using the eventWriter (if they exist). No status will be written if there's no active goroutine or P. Callers can elect to pass a constant value here if the status is clear (e.g. a goroutine must have been Runnable before a GoStart). Otherwise, callers can query the status of either the goroutine or P and pass the appropriate status. In this case, the default status should be traceGoBad or traceProcBad to help identify bugs sooner.

func (tl traceLocker) eventWriter(goStatus traceGoStatus, procStatus traceProcStatus) traceEventWriter

eventtype method #

func (u *subscriptionUnion) eventtype() *eventtype

exceptionhandler function #

Called by sigtramp from Windows VEH handler. Return value signals whether the exception has been handled (EXCEPTION_CONTINUE_EXECUTION) or should be made available to other handlers in the chain (EXCEPTION_CONTINUE_SEARCH). This is nosplit to avoid growing the stack until we've checked for _EXCEPTION_BREAKPOINT, which is raised by abort() if we overflow the g0 stack. go:nosplit

func exceptionhandler(info *exceptionrecord, r *context, gp *g) int32

exceptiontramp function #

in sys_windows_386.s, sys_windows_amd64.s, sys_windows_arm.s, and sys_windows_arm64.s

func exceptiontramp()

execute function #

Schedules gp to run on the current M. If inheritTime is true, gp inherits the remaining time in the current time slice. Otherwise, it starts a new time slice. Never returns. Write barriers are allowed because this is called immediately after acquiring a P in several places. go:yeswritebarrierrec

func execute(gp *g, inheritTime bool)

exit function #

go:wasmimport wasi_snapshot_preview1 proc_exit

func exit(code int32)

exit function #

func exit(code int32)

exit function #

go:nosplit

func exit(e int32)

exit function #

func exit(code int32)

exit function #

go:nosplit

func exit(code int32)

exit function #

This is exported via linkname to assembly in runtime/cgo. go:nosplit go:cgo_unsafe_args go:linkname exit

func exit(code int32)

exit function #

go:nosplit

func exit(code int32)

exit function #

This is exported via linkname to assembly in runtime/cgo. go:linkname exit go:nosplit go:cgo_unsafe_args

func exit(code int32)

exit function #

func exit(code int32)

exit function #

go:nosplit

func exit(r int32)

exit1 function #

func exit1(code int32)

exitThread function #

Not used on OpenBSD, but must be defined.

func exitThread(wait *atomic.Uint32)

exitThread function #

exitThread terminates the current thread, writing *wait = freeMStack when the stack is safe to reclaim. go:noescape

func exitThread(wait *atomic.Uint32)

exitThread function #

func exitThread(wait *atomic.Uint32)

exitThread function #

exitThread terminates the current thread, writing *wait = freeMStack when the stack is safe to reclaim. go:noescape

func exitThread(wait *atomic.Uint32)

exitThread function #

func exitThread(wait *uint32) FIXME: wasm doesn't have atomic yet

func exitThread(wait *atomic.Uint32)

exitThread function #

func exitThread(wait *atomic.Uint32)

exitThread function #

Not used on Darwin, but must be defined.

func exitThread(wait *atomic.Uint32)

exitThread function #

func exitThread(wait *atomic.Uint32)

exitThread function #

func exitThread(wait *atomic.Uint32)

exit_trampoline function #

func exit_trampoline()

exit_trampoline function #

func exit_trampoline()

exits function #

go:noescape

func exits(msg *byte)

exitsyscall function #

The goroutine g exited its system call. Arrange for it to run on a cpu again. This is called only from the go syscall library, not from the low-level system calls used by the runtime. Write barriers are not allowed because our P may have been stolen. This is exported via linkname to assembly in the syscall package. exitsyscall should be an internal detail, but widely used packages access it using linkname. Notable members of the hall of shame include: - gvisor.dev/gvisor Do not remove or change the type signature. See go.dev/issue/67401. go:nosplit go:nowritebarrierrec go:linkname exitsyscall

func exitsyscall()

exitsyscall0 function #

exitsyscall slow path on g0. Failed to acquire P, enqueue gp as runnable. Called via mcall, so gp is the calling g from this M. go:nowritebarrierrec

func exitsyscall0(gp *g)

exitsyscallfast function #

go:nosplit

func exitsyscallfast(oldp *p) bool

exitsyscallfast_pidle function #

func exitsyscallfast_pidle() bool

exitsyscallfast_reacquired function #

exitsyscallfast_reacquired is the exitsyscall path on which this G has successfully reacquired the P it was running on before the syscall. go:nosplit

func exitsyscallfast_reacquired(trace traceLocker)

expWriter method #

expWriter returns a traceWriter that writes into the current M's stream for the given experiment.

func (tl traceLocker) expWriter(exp traceExperiment) traceWriter

expandCgoFrames function #

expandCgoFrames expands frame information for pc, known to be a non-Go function, using the cgoSymbolizer hook. expandCgoFrames returns nil if pc could not be expanded.

func expandCgoFrames(pc uintptr) []Frame

expandFrames function #

func expandFrames(p []BlockProfileRecord)

expiredReadDeadline method #

func (i pollInfo) expiredReadDeadline() bool

expiredWriteDeadline method #

func (i pollInfo) expiredWriteDeadline() bool

f32equal function #

func f32equal(p unsafe.Pointer, q unsafe.Pointer) bool

f32hash function #

func f32hash(p unsafe.Pointer, h uintptr) uintptr

f32to64 function #

func f32to64(f uint32) uint64

f32toint32 function #

func f32toint32(x uint32) int32

f32toint64 function #

func f32toint64(x uint32) int64

f32touint64 function #

func f32touint64(x uint32) uint64

f64equal function #

func f64equal(p unsafe.Pointer, q unsafe.Pointer) bool

f64hash function #

func f64hash(p unsafe.Pointer, h uintptr) uintptr

f64to32 function #

func f64to32(f uint64) uint32

f64toint function #

func f64toint(f uint64) (val int64, ok bool)

f64toint32 function #

func f64toint32(x uint64) int32

f64toint64 function #

func f64toint64(x uint64) int64

f64touint64 function #

func f64touint64(x uint64) uint64

fadd32 function #

func fadd32(x uint32, y uint32) uint32

fadd64 function #

func fadd64(f uint64, g uint64) uint64

fallback_nanotime function #

func fallback_nanotime() int64

fallback_walltime function #

func fallback_walltime() (sec int64, nsec int32)

fandbits function #

func fandbits(x F, y F) F

fastForward method #

fastForward moves the iterator forward by n bytes. n must be a multiple of goarch.PtrSize. limit must be the same limit passed to next for this iterator. nosplit because it is used during write barriers and must not be preempted. go:nosplit

func (tp typePointers) fastForward(n uintptr, limit uintptr) typePointers

fastexprand function #

fastexprand returns a random number from an exponential distribution with the specified mean.

func fastexprand(mean int) int32

fastlog2 function #

fastlog2 implements a fast approximation to the base 2 log of a float64. This is used to compute a geometric distribution for heap sampling, without introducing dependencies into package math. This uses a very rough approximation using the float64 exponent and the first 25 bits of the mantissa. The top 5 bits of the mantissa are used to load limits from a table of constants and the rest are used to scale linearly between them.

func fastlog2(x float64) float64

fatal function #

fatal triggers a fatal error that dumps a stack trace and exits. fatal is equivalent to throw, but is used when user code is expected to be at fault for the failure, such as racing map writes. fatal does not include runtime frames, system goroutines, or frame metadata (fp, sp, pc) in the stack trace unless GOTRACEBACK=system or higher. go:nosplit

func fatal(s string)

fatalpanic function #

fatalpanic implements an unrecoverable panic. It is like fatalthrow, except that if msgs != nil, fatalpanic also prints panic messages and decrements runningPanicDefers once main is blocked from exiting. go:nosplit

func fatalpanic(msgs *_panic)

fatalsignal function #

func fatalsignal(sig uint32, c *sigctxt, gp *g, mp *m) *g

fatalthrow function #

fatalthrow implements an unrecoverable runtime throw. It freezes the system, prints stack traces starting from its caller, and terminates the process. go:nosplit

func fatalthrow(t throwType)

fault method #

func (c *sigctxt) fault() uintptr

fault method #

func (c *sigctxt) fault() uintptr

fault method #

func (c *sigctxt) fault() uintptr

fault method #

func (c *sigctxt) fault() uintptr

fault method #

func (c *sigctxt) fault() uintptr

fault method #

func (c *sigctxt) fault() uintptr

fault method #

func (c *sigctxt) fault() uintptr

fault method #

func (c *sigctxt) fault() uintptr

fault method #

func (c *sigctxt) fault() uintptr

fault method #

func (c *sigctxt) fault() uintptr

fault method #

func (c *sigctxt) fault() uintptr

fault method #

func (c *sigctxt) fault() uintptr

fault method #

func (c *sigctxt) fault() uintptr

fault method #

func (c *sigctxt) fault() uintptr

fault method #

func (c *sigctxt) fault() uintptr

fault method #

func (c *sigctxt) fault() uintptr

fault method #

func (c *sigctxt) fault() uintptr

fault method #

func (c *sigctxt) fault() uintptr

fault method #

func (c *sigctxt) fault() uintptr

fcmp64 function #

func fcmp64(f uint64, g uint64) (cmp int32, isnan bool)

fcntl function #

go:nosplit go:cgo_unsafe_args

func fcntl(fd int32, cmd int32, arg int32) (ret int32, errno int32)

fcntl function #

go:nosplit

func fcntl(fd int32, cmd int32, arg int32) (ret int32, errno int32)

fcntl function #

go:nosplit

func fcntl(fd int32, cmd int32, arg int32) (int32, int32)

fcntl function #

go:nosplit go:cgo_unsafe_args

func fcntl(fd int32, cmd int32, arg int32) (ret int32, errno int32)

fcntl function #

func fcntl(fd int32, cmd int32, arg int32) (ret int32, errno int32)

fcntl function #

go:nosplit

func fcntl(fd int32, cmd int32, arg int32) (ret int32, errno int32)

fcntl function #

func fcntl(fd int32, cmd int32, arg int32) (ret int32, errno int32)

fcntl function #

func fcntl(fd int32, cmd int32, arg int32) (ret int32, errno int32)

fcntl function #

func fcntl(fd int32, cmd int32, arg int32) (ret int32, errno int32)

fcntl_trampoline function #

func fcntl_trampoline()

fcntl_trampoline function #

func fcntl_trampoline()

fd_write function #

go:wasmimport wasi_snapshot_preview1 fd_write go:noescape

func fd_write(fd int32, iovs unsafe.Pointer, iovsLen size, nwritten *size) errno

fdiv32 function #

func fdiv32(x uint32, y uint32) uint32

fdiv64 function #

func fdiv64(f uint64, g uint64) uint64

feq32 function #

func feq32(x uint32, y uint32) bool

feq64 function #

func feq64(x uint64, y uint64) bool

fge32 function #

func fge32(x uint32, y uint32) bool

fge64 function #

func fge64(x uint64, y uint64) bool

fgt32 function #

func fgt32(x uint32, y uint32) bool

fgt64 function #

func fgt64(x uint64, y uint64) bool

fileLine method #

fileLine returns the file name and line number of the call within the given frame. As a convenience, for the innermost frame, it returns the file and line of the PC this unwinder was started at (often this is a call to another physical function). It returns "?", 0 if something goes wrong.

func (u *inlineUnwinder) fileLine(uf inlineFrame) (file string, line int)

fillAligned function #

fillAligned returns x but with all zeroes in m-aligned groups of m bits set to 1 if any bit in the group is non-zero. For example, fillAligned(0x0100a3, 8) == 0xff00ff. Note that if m == 1, this is a no-op. m must be a power of 2 <= maxPagesPerPhysPage.

func fillAligned(x uint64, m uint) uint64

fillstack function #

func fillstack(stk stack, b byte)

finalizercommit function #

func finalizercommit(gp *g, lock unsafe.Pointer) bool

find method #

find finds the given interface/type pair in t. Returns nil if the given interface/type pair isn't present.

func (t *itabTableType) find(inter *interfacetype, typ *_type) *itab

find method #

find returns the highest chunk index that may contain pages available to scavenge. It also returns an offset to start searching in the highest chunk.

func (s *scavengeIndex) find(force bool) (chunkIdx, uint)

find method #

find searches for npages contiguous free pages in pallocBits and returns the index where that run starts, as well as the index of the first free page it found in the search. searchIdx represents the first known free page and where to begin the next search from. If find fails to find any free space, it returns an index of ^uint(0) and the new searchIdx should be ignored. Note that if npages == 1, the two returned values will always be identical.

func (b *pallocBits) find(npages uintptr, searchIdx uint) (uint, uint)

find method #

find searches for the first (address-ordered) contiguous free region of npages in size and returns a base address for that region. It uses p.searchAddr to prune its search and assumes that no palloc chunks below chunkIndex(p.searchAddr) contain any free memory at all. find also computes and returns a candidate p.searchAddr, which may or may not prune more of the address space than p.searchAddr already does. This candidate is always a valid p.searchAddr. find represents the slow path and the full radix tree search. Returns a base address of 0 on failure, in which case the candidate searchAddr returned is invalid and must be ignored. p.mheapLock must be held.

func (p *pageAlloc) find(npages uintptr) (uintptr, offAddr)

find1 method #

find1 is a helper for find which searches for a single free page in the pallocBits and returns the index. See find for an explanation of the searchIdx parameter.

func (b *pallocBits) find1(searchIdx uint) uint

findAddrGreaterEqual method #

findAddrGreaterEqual returns the smallest address represented by a that is >= addr. Thus, if the address is represented by a, then it returns addr. The second return value indicates whether such an address exists for addr in a. That is, if addr is larger than any address known to a, the second return value will be false.

func (a *addrRanges) findAddrGreaterEqual(addr uintptr) (uintptr, bool)

findBitRange64 function #

findBitRange64 returns the bit index of the first set of n consecutive 1 bits. If no consecutive set of 1 bits of size n may be found in c, then it returns an integer >= 64. n must be > 0.

func findBitRange64(c uint64, n uint) uint

findLargeN method #

findLargeN is a helper for find which searches for npages contiguous free pages in this pallocBits and returns the index where that run starts, as well as the index of the first free page it found it its search. See alloc for an explanation of the searchIdx parameter. Returns a ^uint(0) index on failure and the new searchIdx should be ignored. findLargeN assumes npages > 64, where any such run of free pages crosses at least one aligned 64-bit boundary in the bits.

func (b *pallocBits) findLargeN(npages uintptr, searchIdx uint) (uint, uint)

findMappedAddr method #

findMappedAddr returns the smallest mapped offAddr that is >= addr. That is, if addr refers to mapped memory, then it is returned. If addr is higher than any mapped region, then it returns maxOffAddr. p.mheapLock must be held.

func (p *pageAlloc) findMappedAddr(addr offAddr) offAddr

findObject method #

findObject returns the stack object containing address a, if any. Must have called buildIndex previously.

func (s *stackScanState) findObject(a uintptr) *stackObject

findObject function #

findObject returns the base address for the heap object containing the address p, the object's span, and the index of the object in s. If p does not point into a heap object, it returns base == 0. If p points is an invalid heap pointer and debug.invalidptr != 0, findObject panics. refBase and refOff optionally give the base address of the object in which the pointer p was found and the byte offset at which it was found. These are used for error reporting. It is nosplit so it is safe for p to be a pointer to the current goroutine's stack. Since p is a uintptr, it would not be adjusted if the stack were to move. findObject should be an internal detail, but widely used packages access it using linkname. Notable members of the hall of shame include: - github.com/bytedance/sonic Do not remove or change the type signature. See go.dev/issue/67401. go:linkname findObject go:nosplit

func findObject(p uintptr, refBase uintptr, refOff uintptr) (base uintptr, s *mspan, objIndex uintptr)

findRunnable function #

Finds a runnable goroutine to execute. Tries to steal from other P's, get g from local or global queue, poll network. tryWakeP indicates that the returned goroutine is not normal (GC worker, trace reader) so the caller should try to wake a P.

func findRunnable() (gp *g, inheritTime bool, tryWakeP bool)

findRunnableGCWorker method #

findRunnableGCWorker returns a background mark worker for pp if it should be run. This must only be called when gcBlackenEnabled != 0.

func (c *gcControllerState) findRunnableGCWorker(pp *p, now int64) (*g, int64)

findScavengeCandidate method #

findScavengeCandidate returns a start index and a size for this pallocData segment which represents a contiguous region of free and unscavenged memory. searchIdx indicates the page index within this chunk to start the search, but note that findScavengeCandidate searches backwards through the pallocData. As a result, it will return the highest scavenge candidate in address order. min indicates a hard minimum size and alignment for runs of pages. That is, findScavengeCandidate will not return a region smaller than min pages in size, or that is min pages or greater in size but not aligned to min. min must be a non-zero power of 2 <= maxPagesPerPhysPage. max is a hint for how big of a region is desired. If max >= pallocChunkPages, then findScavengeCandidate effectively returns entire free and unscavenged regions. If max < pallocChunkPages, it may truncate the returned region such that size is max. However, findScavengeCandidate may still return a larger region if, for example, it chooses to preserve huge pages, or if max is not aligned to min (it will round up). That is, even if max is small, the returned size is not guaranteed to be equal to max. max is allowed to be less than min, in which case it is as if max == min.

func (m *pallocData) findScavengeCandidate(searchIdx uint, minimum uintptr, max uintptr) (uint, uint)

findSmallN method #

findSmallN is a helper for find which searches for npages contiguous free pages in this pallocBits and returns the index where that run of contiguous pages starts as well as the index of the first free page it finds in its search. See find for an explanation of the searchIdx parameter. Returns a ^uint(0) index on failure and the new searchIdx should be ignored. findSmallN assumes npages <= 64, where any such contiguous run of pages crosses at most one aligned 64-bit boundary in the bits.

func (b *pallocBits) findSmallN(npages uintptr, searchIdx uint) (uint, uint)

findSucc method #

findSucc returns the first index in a such that addr is less than the base of the addrRange at that index.

func (a *addrRanges) findSucc(addr uintptr) int

findfunc function #

findfunc looks up function metadata for a PC. It is nosplit because it's part of the isgoexception implementation. findfunc should be an internal detail, but widely used packages access it using linkname. Notable members of the hall of shame include: - github.com/phuslu/log Do not remove or change the type signature. See go.dev/issue/67401. go:nosplit go:linkname findfunc

func findfunc(pc uintptr) funcInfo

findmoduledatap function #

findmoduledatap looks up the moduledata for a PC. It is nosplit because it's part of the isgoexception implementation. go:nosplit

func findmoduledatap(pc uintptr) *moduledata

findnull function #

go:nosplit

func findnull(s *byte) int

findnullw function #

func findnullw(s *uint16) int

findsghi function #

func findsghi(gp *g, stk stack) uintptr

finishGCTransition method #

finishGCTransition notifies the limiter that the GC transition is complete and releases ownership of it. It also accumulates STW time in the bucket. now must be the timestamp from the end of the STW pause.

func (l *gcCPULimiterState) finishGCTransition(now int64)

finishInternal method #

finishInternal is an unwinder-internal helper called after the stack has been exhausted. It sets the unwinder to an invalid state and checks that it successfully unwound the entire stack.

func (u *unwinder) finishInternal()

finishsweep_m function #

finishsweep_m ensures that all spans are swept. The world must be stopped. This ensures there are no sweeps in progress. go:nowritebarrier

func finishsweep_m()

finq_callback function #

func finq_callback(fn *funcval, obj unsafe.Pointer, nret uintptr, fint *_type, ot *ptrtype)

fint32to32 function #

func fint32to32(x int32) uint32

fint32to64 function #

func fint32to64(x int32) uint64

fint64to32 function #

func fint64to32(x int64) uint32

fint64to64 function #

func fint64to64(x int64) uint64

fintto32 function #

func fintto32(val int64) (f uint32)

fintto64 function #

func fintto64(val int64) (f uint64)

fips_fatal function #

go:linkname fips_fatal crypto/internal/fips140.fatal

func fips_fatal(s string)

fips_getIndicator function #

go:linkname fips_getIndicator crypto/internal/fips140.getIndicator

func fips_getIndicator() uint8

fips_setIndicator function #

go:linkname fips_setIndicator crypto/internal/fips140.setIndicator

func fips_setIndicator(indicator uint8)

fipstls_runtime_arg0 function #

go:linkname fipstls_runtime_arg0 crypto/internal/boring/fipstls.runtime_arg0

func fipstls_runtime_arg0() string

firstcontinuehandler function #

It seems Windows searches ContinueHandler's list even if ExceptionHandler returns EXCEPTION_CONTINUE_EXECUTION. firstcontinuehandler will stop that search, if exceptionhandler did the same earlier. It is nosplit for the same reason as exceptionhandler. go:nosplit

func firstcontinuehandler(info *exceptionrecord, r *context, gp *g) int32

firstcontinuetramp function #

func firstcontinuetramp()

fixsigcode method #

go:nosplit

func (c *sigctxt) fixsigcode(sig uint32)

fixsigcode method #

go:nosplit

func (c *sigctxt) fixsigcode(sig uint32)

fixsigcode method #

go:nosplit

func (c *sigctxt) fixsigcode(sig uint32)

fixsigcode method #

go:nosplit

func (c *sigctxt) fixsigcode(sig uint32)

fixsigcode method #

go:nosplit

func (c *sigctxt) fixsigcode(sig uint32)

fixsigcode method #

go:nosplit

func (c *sigctxt) fixsigcode(sig uint32)

fixsigcode method #

go:nosplit

func (c *sigctxt) fixsigcode(sig uint32)

fixsigcode method #

go:nosplit

func (c *sigctxt) fixsigcode(sig uint32)

fixsigcode method #

go:nosplit

func (c *sigctxt) fixsigcode(sig uint32)

float64HistOrInit method #

float64HistOrInit tries to pull out an existing float64Histogram from the value, but if none exists, then it allocates one with the given buckets.

func (v *metricValue) float64HistOrInit(buckets []float64) *metricFloat64Histogram

float64Inf function #

func float64Inf() float64

float64NegInf function #

func float64NegInf() float64

float64bits function #

float64bits returns the IEEE 754 binary representation of f.

func float64bits(f float64) uint64

float64frombits function #

float64frombits returns the floating point number corresponding the IEEE 754 binary representation b.

func float64frombits(b uint64) float64

float64toint64 function #

func float64toint64(d float64) (y uint64)

float64touint32 function #

func float64touint32(a float64) uint32

float64touint64 function #

func float64touint64(d float64) (y uint64)

flush method #

flush puts w.traceBuf on the queue of full buffers. nosplit because it's part of writing an event for an M, which must not have any stack growth. go:nosplit

func (w traceWriter) flush() traceWriter

flush method #

flush empties out unallocated free pages in the given cache into s. Then, it clears the cache, such that empty returns true. p.mheapLock must be held. Must run on the system stack because p.mheapLock must be held. go:systemstack

func (c *pageCache) flush(p *pageAlloc)

flush function #

func flush()

flush method #

Flush the bits that have been written, and add zeros as needed to cover the full object [addr, addr+size).

func (h writeUserArenaHeapBits) flush(s *mspan, addr uintptr, size uintptr)

flushallmcaches function #

flushallmcaches flushes the mcaches of all Ps. The world must be stopped. go:nowritebarrier

func flushallmcaches()

flushmcache function #

flushmcache flushes the mcache of allp[i]. The world must be stopped. go:nowritebarrier

func flushmcache(i int)

fmax function #

func fmax(x F, y F) F

fmax32 function #

func fmax32(x float32, y float32) float32

fmax64 function #

func fmax64(x float64, y float64) float64

fmin function #

func fmin(x F, y F) F

fmin32 function #

func fmin32(x float32, y float32) float32

fmin64 function #

func fmin64(x float64, y float64) float64

fmtNSAsMS function #

fmtNSAsMS nicely formats ns nanoseconds as milliseconds.

func fmtNSAsMS(buf []byte, ns uint64) []byte

fmul32 function #

func fmul32(x uint32, y uint32) uint32

fmul64 function #

func fmul64(f uint64, g uint64) uint64

fneg64 function #

func fneg64(f uint64) uint64

forEachG function #

forEachG calls fn on every G from allgs. forEachG takes a lock to exclude concurrent addition of new Gs.

func forEachG(fn func(gp *g))

forEachGRace function #

forEachGRace calls fn on every G from allgs. forEachGRace avoids locking, but does not exclude addition of new Gs during execution, which may be missed.

func forEachGRace(fn func(gp *g))

forEachP function #

forEachP calls fn(p) for every P p when p reaches a GC safe point. If a P is currently executing code, this will bring the P to a GC safe point and execute fn on that P. If the P is not executing code (it is idle or in a syscall), this will call fn(p) directly while preventing the P from exiting its state. This does not ensure that fn will run on every CPU executing Go code, but it acts as a global memory barrier. GC uses this as a "ragged barrier." The caller must hold worldsema. fn must not refer to any part of the current goroutine's stack, since the GC may move it.

func forEachP(reason waitReason, fn func(*p))

forEachPInternal function #

forEachPInternal calls fn(p) for every P p when p reaches a GC safe point. It is the internal implementation of forEachP. The caller must hold worldsema and either must ensure that a GC is not running (otherwise this may deadlock with the GC trying to preempt this P) or it must leave its goroutine in a preemptible state before it switches to the systemstack. Due to these restrictions, prefer forEachP when possible. go:systemstack

func forEachPInternal(fn func(*p))

forbits function #

func forbits(x F, y F) F

forcegchelper function #

func forcegchelper()

fp method #

func (c *sigctxt) fp() uint32

fp method #

func (c *sigctxt) fp() uint32

fp method #

func (c *sigctxt) fp() uint32

fp method #

func (c *sigctxt) fp() uint32

fpTracebackPCs function #

fpTracebackPCs populates pcBuf with the return addresses for each frame and returns the number of PCs written to pcBuf. The returned PCs correspond to "physical frames" rather than "logical frames"; that is if A is inlined into B, this will return a PC for only B.

func fpTracebackPCs(fp unsafe.Pointer, pcBuf []uintptr) (i int)

fpTracebackPartialExpand function #

fpTracebackPartialExpand records a call stack obtained starting from fp. This function will skip the given number of frames, properly accounting for inlining, and save remaining frames as "physical" return addresses. The consumer should later use CallersFrames or similar to expand inline frames.

func fpTracebackPartialExpand(skip int, fp unsafe.Pointer, pcBuf []uintptr) int

fpack32 function #

func fpack32(sign uint32, mant uint32, exp int, trunc uint32) uint32

fpack64 function #

func fpack64(sign uint64, mant uint64, exp int, trunc uint64) uint64

fpscr method #

func (c *sigctxt) fpscr() uint32

fpscrx method #

func (c *sigctxt) fpscrx() uint32

fpunwindExpand function #

fpunwindExpand expands a call stack from pcBuf into dst, returning the number of PCs written to dst. pcBuf and dst should not overlap. fpunwindExpand checks if pcBuf contains logical frames (which include inlined frames) or physical frames (produced by frame pointer unwinding) using a sentinel value in pcBuf[0]. Logical frames are simply returned without the sentinel. Physical frames are turned into logical frames via inline unwinding and by applying the skip value that's stored in pcBuf[0].

func fpunwindExpand(dst []uintptr, pcBuf []uintptr) int

free method #

free returns the userArena's chunks back to mheap and marks it as defunct. Must be called at most once for any given arena. This operation is not safe to call concurrently with other operations on the same arena.

func (a *userArena) free()

free method #

free frees the range [i, i+n) of pages in the pallocBits.

func (b *pallocBits) free(i uint, n uint)

free method #

free updates sc given that npages was freed in the corresponding chunk.

func (sc *scavChunkData) free(npages uint, newGen uint32)

free method #

free returns a spanSetBlock back to the pool.

func (p *spanSetBlockAlloc) free(block *spanSetBlock)

free method #

func (c *pollCache) free(pd *pollDesc)

free method #

func (f *fixalloc) free(p unsafe.Pointer)

free method #

free updates metadata for chunk at index ci with the fact that a free of npages occurred. free may only run concurrently with find.

func (s *scavengeIndex) free(ci chunkIdx, page uint, npages uint)

free method #

free returns npages worth of memory starting at base back to the page heap. p.mheapLock must be held. Must run on the system stack because p.mheapLock must be held. go:systemstack

func (p *pageAlloc) free(base uintptr, npages uintptr)

free1 method #

free1 frees a single page in the pallocBits at i.

func (b *pallocBits) free1(i uint)

freeAll method #

freeAll frees all the bits of b.

func (b *pallocBits) freeAll()

freeMSpanLocked method #

freeMSpanLocked free an mspan object. h.lock must be held. freeMSpanLocked must be called on the system stack because its caller holds the heap lock. See mheap for details. Running on the system stack also ensures that we won't switch Ps during this function. See tryAllocMSpan for details. go:systemstack

func (h *mheap) freeMSpanLocked(s *mspan)

freeManual method #

freeManual frees a manually-managed span returned by allocManual. typ must be the same as the spanAllocType passed to the allocManual that allocated s. This must only be called when gcphase == _GCoff. See mSpanState for an explanation. freeManual must be called on the system stack because it acquires the heap lock. See mheap for details. go:systemstack

func (h *mheap) freeManual(s *mspan, typ spanAllocType)

freeSomeWbufs function #

freeSomeWbufs frees some workbufs back to the heap and returns true if it should be called again to free more.

func freeSomeWbufs(preemptible bool) bool

freeSpan method #

Free the span back into the heap.

func (h *mheap) freeSpan(s *mspan)

freeSpanLocked method #

func (h *mheap) freeSpanLocked(s *mspan, typ spanAllocType)

freeSpecial function #

freeSpecial performs any cleanup on special s and deallocates it. s must already be unlinked from the specials list.

func freeSpecial(s *special, p unsafe.Pointer, size uintptr)

freeStackSpans function #

freeStackSpans frees unused stack spans at the end of GC.

func freeStackSpans()

freeUserArenaChunk function #

freeUserArenaChunk releases the user arena represented by s back to the runtime. x must be a live pointer within s. The runtime will set the user arena to fault once it's safe (the GC is no longer running) and then once the user arena is no longer referenced by the application, will allow it to be reused.

func freeUserArenaChunk(s *mspan, x unsafe.Pointer)

freemcache function #

freemcache releases resources associated with this mcache and puts the object onto a free list. In some cases there is no way to simply release resources, such as statistics, so donate them to a different mcache (the recipient).

func freemcache(c *mcache)

freezetheworld function #

Similar to stopTheWorld but best-effort and can be called several times. There is no reverse operation, used during crashing. This function must not lock any mutexes.

func freezetheworld()

fs method #

func (c *sigctxt) fs() uint32

fs method #

func (c *sigctxt) fs() uint32

fs method #

func (c *sigctxt) fs() uint32

fs method #

func (c *sigctxt) fs() uint64

fs method #

func (c *sigctxt) fs() uint32

fs method #

func (c *sigctxt) fs() uint64

fs method #

func (c *sigctxt) fs() uint64

fs method #

func (c *sigctxt) fs() uint64

fs method #

func (c *sigctxt) fs() uint64

fs method #

func (c *sigctxt) fs() uint64

fs method #

func (c *sigctxt) fs() uint64

fsub64 function #

func fsub64(f uint64, g uint64) uint64

fuint64to32 function #

func fuint64to32(x uint64) uint32

fuint64to64 function #

func fuint64to64(x uint64) uint64

full function #

full reports whether a send on c would block (that is, the channel is full). It uses a single word-sized read of mutable state, so although the answer is instantaneously true, the correct answer may have changed by the time the calling function receives the return value.

func full(c *hchan) bool

fullSwept method #

fullSwept returns the spanSet which holds swept spans without any free slots for this sweepgen.

func (c *mcentral) fullSwept(sweepgen uint32) *spanSet

fullUnswept method #

fullUnswept returns the spanSet which holds unswept spans without any free slots for this sweepgen.

func (c *mcentral) fullUnswept(sweepgen uint32) *spanSet

funcInfo method #

func (f *_func) funcInfo() funcInfo

funcInfo method #

func (f *Func) funcInfo() funcInfo

funcMaxSPDelta function #

funcMaxSPDelta returns the maximum spdelta at any point in f.

func funcMaxSPDelta(f funcInfo) int32

funcName method #

funcName returns the string at nameOff in the function name table.

func (md *moduledata) funcName(nameOff int32) string

funcNameForPrint function #

funcNameForPrint returns the function name for printing to the user.

func funcNameForPrint(name string) string

funcNamePiecesForPrint function #

funcNamePiecesForPrint returns the function name for printing to the user. It returns three pieces so it doesn't need an allocation for string concatenation.

func funcNamePiecesForPrint(name string) (string, string, string)

funcdata function #

funcdata returns a pointer to the ith funcdata for f. funcdata should be kept in sync with cmd/link:writeFuncs.

func funcdata(f funcInfo, i uint8) unsafe.Pointer

funcfile function #

func funcfile(f funcInfo, fileno int32) string

funcline function #

func funcline(f funcInfo, targetpc uintptr) (file string, line int32)

funcline1 function #

funcline1 should be an internal detail, but widely used packages access it using linkname. Notable members of the hall of shame include: - github.com/phuslu/log Do not remove or change the type signature. See go.dev/issue/67401. go:linkname funcline1

func funcline1(f funcInfo, targetpc uintptr, strict bool) (file string, line int32)

funcname function #

func funcname(f funcInfo) string

funcpkgpath function #

func funcpkgpath(f funcInfo) string

funcspdelta function #

func funcspdelta(f funcInfo, targetpc uintptr) int32

funpack32 function #

func funpack32(f uint32) (sign uint32, mant uint32, exp int, inf bool, nan bool)

funpack64 function #

func funpack64(f uint64) (sign uint64, mant uint64, exp int, inf bool, nan bool)

futex function #

go:noescape

func futex(addr unsafe.Pointer, op int32, val uint32, ts unsafe.Pointer, addr2 unsafe.Pointer, val3 uint32) int32

futexsleep function #

go:nosplit

func futexsleep(addr *uint32, val uint32, ns int64)

futexsleep function #

go:nosplit

func futexsleep(addr *uint32, val uint32, ns int64)

futexsleep function #

Atomically, if(*addr == val) sleep Might be woken up spuriously; that's allowed. Don't sleep longer than ns; ns < 0 means forever. go:nosplit

func futexsleep(addr *uint32, val uint32, ns int64)

futexsleep1 function #

func futexsleep1(addr *uint32, val uint32, ns int64)

futexsleep1 function #

func futexsleep1(addr *uint32, val uint32, ns int64)

futexwakeup function #

go:nosplit

func futexwakeup(addr *uint32, cnt uint32)

futexwakeup function #

go:nosplit

func futexwakeup(addr *uint32, cnt uint32)

futexwakeup function #

If any procs are sleeping on addr, wake up at most cnt. go:nosplit

func futexwakeup(addr *uint32, cnt uint32)

g0_pthread_key_create function #

go:nosplit go:cgo_unsafe_args

func g0_pthread_key_create(k *pthreadkey, destructor uintptr) int32

g0_pthread_setspecific function #

go:nosplit go:cgo_unsafe_args

func g0_pthread_setspecific(k pthreadkey, value uintptr) int32

gFromSP function #

func gFromSP(mp *m, sp uintptr) *g

gbit16 function #

gbit16 reads a 16-bit little-endian binary number from b and returns it with the remaining slice of b. go:nosplit

func gbit16(b []byte) (int, []byte)

gcAssistAlloc function #

gcAssistAlloc performs GC work to make gp's assist debt positive. gp must be the calling user goroutine. This must be called with preemption enabled.

func gcAssistAlloc(gp *g)

gcAssistAlloc1 function #

gcAssistAlloc1 is the part of gcAssistAlloc that runs on the system stack. This is a separate function to make it easier to see that we're not capturing anything from the user stack, since the user stack may move while we're in this function. gcAssistAlloc1 indicates whether this assist completed the mark phase by setting gp.param to non-nil. This can't be communicated on the stack since it may move. go:systemstack

func gcAssistAlloc1(gp *g, scanWork int64)

gcBgMarkPrepare function #

gcBgMarkPrepare sets up state for background marking. Mutator assists must not yet be enabled.

func gcBgMarkPrepare()

gcBgMarkStartWorkers function #

gcBgMarkStartWorkers prepares background mark worker goroutines. These goroutines will not run until the mark phase, but they must be started while the work is not stopped and from a regular G stack. The caller must hold worldsema.

func gcBgMarkStartWorkers()

gcBgMarkWorker function #

func gcBgMarkWorker(ready chan struct{...})

gcComputeStartingStackSize function #

func gcComputeStartingStackSize()

gcControllerCommit function #

gcControllerCommit is gcController.commit, but passes arguments from live (non-test) data. It also updates any consumers of the GC pacing, such as sweep pacing and the background scavenger. Calls gcController.commit. The heap lock must be held, so this must be executed on the system stack. go:systemstack

func gcControllerCommit()

gcDrain function #

gcDrain scans roots and objects in work buffers, blackening grey objects until it is unable to get more work. It may return before GC is done; it's the caller's responsibility to balance work from other Ps. If flags&gcDrainUntilPreempt != 0, gcDrain returns when g.preempt is set. If flags&gcDrainIdle != 0, gcDrain returns when there is other work to do. If flags&gcDrainFractional != 0, gcDrain self-preempts when pollFractionalWorkerExit() returns true. This implies gcDrainNoBlock. If flags&gcDrainFlushBgCredit != 0, gcDrain flushes scan work credit to gcController.bgScanCredit every gcCreditSlack units of scan work. gcDrain will always return if there is a pending STW or forEachP. Disabling write barriers is necessary to ensure that after we've confirmed that we've drained gcw, that we don't accidentally end up flipping that condition by immediately adding work in the form of a write barrier buffer flush. Don't set nowritebarrierrec because it's safe for some callees to have write barriers enabled. go:nowritebarrier

func gcDrain(gcw *gcWork, flags gcDrainFlags)

gcDrainMarkWorkerDedicated function #

gcDrainMarkWorkerDedicated is a wrapper for gcDrain that exists to better account mark time in profiles.

func gcDrainMarkWorkerDedicated(gcw *gcWork, untilPreempt bool)

gcDrainMarkWorkerFractional function #

gcDrainMarkWorkerFractional is a wrapper for gcDrain that exists to better account mark time in profiles.

func gcDrainMarkWorkerFractional(gcw *gcWork)

gcDrainMarkWorkerIdle function #

gcDrainMarkWorkerIdle is a wrapper for gcDrain that exists to better account mark time in profiles.

func gcDrainMarkWorkerIdle(gcw *gcWork)

gcDrainN function #

gcDrainN blackens grey objects until it has performed roughly scanWork units of scan work or the G is preempted. This is best-effort, so it may perform less work if it fails to get a work buffer. Otherwise, it will perform at least n units of work, but may perform more because scanning is always done in whole object increments. It returns the amount of scan work performed. The caller goroutine must be in a preemptible state (e.g., _Gwaiting) to prevent deadlocks during stack scanning. As a consequence, this must be called on the system stack. go:nowritebarrier go:systemstack

func gcDrainN(gcw *gcWork, scanWork int64) int64

gcDumpObject function #

gcDumpObject dumps the contents of obj for debugging and marks the field at byte offset off in obj.

func gcDumpObject(label string, obj uintptr, off uintptr)

gcFlushBgCredit function #

gcFlushBgCredit flushes scanWork units of background scan work credit. This first satisfies blocked assists on the work.assistQueue and then flushes any remaining credit to gcController.bgScanCredit. Write barriers are disallowed because this is used by gcDrain after it has ensured that all work is drained and this must preserve that condition. go:nowritebarrierrec

func gcFlushBgCredit(scanWork int64)

gcMark function #

gcMark runs the mark (or, for concurrent GC, mark termination) All gcWork caches must be empty. STW is in effect at this point.

func gcMark(startTime int64)

gcMarkDone function #

gcMarkDone transitions the GC from mark to mark termination if all reachable objects have been marked (that is, there are no grey objects and can be no more in the future). Otherwise, it flushes all local work to the global queues where it can be discovered by other workers. This should be called when all local mark work has been drained and there are no remaining workers. Specifically, when work.nwait == work.nproc && !gcMarkWorkAvailable(p) The calling context must be preemptible. Flushing local work is important because idle Ps may have local work queued. This is the only way to make that work visible and drive GC to completion. It is explicitly okay to have write barriers in this function. If it does transition to mark termination, then all reachable objects have been marked, so the write barrier cannot shade any more objects.

func gcMarkDone()

gcMarkRootCheck function #

gcMarkRootCheck checks that all roots have been scanned. It is purely for debugging.

func gcMarkRootCheck()

gcMarkRootPrepare function #

gcMarkRootPrepare queues root scanning jobs (stacks, globals, and some miscellany) and initializes scanning-related state. The world must be stopped.

func gcMarkRootPrepare()

gcMarkTermination function #

World must be stopped and mark assists and background workers must be disabled.

func gcMarkTermination(stw worldStop)

gcMarkTinyAllocs function #

gcMarkTinyAllocs greys all active tiny alloc blocks. The world must be stopped.

func gcMarkTinyAllocs()

gcMarkWorkAvailable function #

gcMarkWorkAvailable reports whether executing a mark worker on p is potentially useful. p may be nil, in which case it only checks the global sources of work.

func gcMarkWorkAvailable(p *p) bool

gcPaceScavenger function #

gcPaceScavenger updates the scavenger's pacing, particularly its rate and RSS goal. For this, it requires the current heapGoal, and the heapGoal for the previous GC cycle. The RSS goal is based on the current heap goal with a small overhead to accommodate non-determinism in the allocator. The pacing is based on scavengePageRate, which applies to both regular and huge pages. See that constant for more information. Must be called whenever GC pacing is updated. mheap_.lock must be held or the world must be stopped.

func gcPaceScavenger(memoryLimit int64, heapGoal uint64, lastHeapGoal uint64)

gcPaceSweeper function #

gcPaceSweeper updates the sweeper's pacing parameters. Must be called whenever the GC's pacing is updated. The world must be stopped, or mheap_.lock must be held.

func gcPaceSweeper(trigger uint64)

gcParkAssist function #

gcParkAssist puts the current goroutine on the assist queue and parks. gcParkAssist reports whether the assist is now satisfied. If it returns false, the caller must retry the assist.

func gcParkAssist() bool

gcParkStrongFromWeak function #

gcParkStrongFromWeak puts the current goroutine on the weak->strong queue and parks.

func gcParkStrongFromWeak() *m

gcResetMarkState function #

gcResetMarkState resets global state prior to marking (concurrent or STW) and resets the stack scan state of all Gs. This is safe to do without the world stopped because any Gs created during or after this will start out in the reset state. gcResetMarkState must be called on the system stack because it acquires the heap lock. See mheap for details. go:systemstack

func gcResetMarkState()

gcStart function #

gcStart starts the GC. It transitions from _GCoff to _GCmark (if debug.gcstoptheworld == 0) or performs all of GC (if debug.gcstoptheworld != 0). This may return without performing this transition in some cases, such as when called on a system stack or with locks held.

func gcStart(trigger gcTrigger)

gcSweep function #

gcSweep must be called on the system stack because it acquires the heap lock. See mheap for details. Returns true if the heap was fully swept by this function. The world must be stopped. go:systemstack

func gcSweep(mode gcMode) bool

gcTestIsReachable function #

gcTestIsReachable performs a GC and returns a bit set where bit i is set if ptrs[i] is reachable.

func gcTestIsReachable(ptrs ...unsafe.Pointer) (mask uint64)

gcTestMoveStackOnNextCall function #

gcTestMoveStackOnNextCall causes the stack to be moved on a call immediately following the call to this. It may not work correctly if any other work appears after this call (such as returning). Typically the following call should be marked go:noinline so it performs a stack check. In rare cases this may not cause the stack to move, specifically if there's a preemption between this call and the next.

func gcTestMoveStackOnNextCall()

gcTestPointerClass function #

gcTestPointerClass returns the category of what p points to, one of: "heap", "stack", "data", "bss", "other". This is useful for checking that a test is doing what it's intended to do. This is nosplit simply to avoid extra pointer shuffling that may complicate a test. go:nosplit

func gcTestPointerClass(p unsafe.Pointer) string

gcWaitOnMark function #

gcWaitOnMark blocks until GC finishes the Nth mark phase. If GC has already completed this mark phase, it returns immediately.

func gcWaitOnMark(n uint32)

gcWakeAllAssists function #

gcWakeAllAssists wakes all currently blocked assists. This is used at the end of a GC cycle. gcBlackenEnabled must be false to prevent new assists from going to sleep after this point.

func gcWakeAllAssists()

gcWakeAllStrongFromWeak function #

gcWakeAllStrongFromWeak wakes all currently blocked weak->strong conversions. This is used at the end of a GC cycle. work.strongFromWeak.block must be false to prevent woken goroutines from immediately going back to sleep.

func gcWakeAllStrongFromWeak()

gcWriteBarrier1 function #

Called from compiled code; declared for vet; do NOT call from Go.

func gcWriteBarrier1()

gcWriteBarrier2 function #

gcWriteBarrier2 should be an internal detail, but widely used packages access it using linkname. Notable members of the hall of shame include: - github.com/bytedance/sonic Do not remove or change the type signature. See go.dev/issue/67401. go:linkname gcWriteBarrier2

func gcWriteBarrier2()

gcWriteBarrier3 function #

func gcWriteBarrier3()

gcWriteBarrier4 function #

func gcWriteBarrier4()

gcWriteBarrier5 function #

func gcWriteBarrier5()

gcWriteBarrier6 function #

func gcWriteBarrier6()

gcWriteBarrier7 function #

func gcWriteBarrier7()

gcWriteBarrier8 function #

func gcWriteBarrier8()

gcWriteBarrierBP function #

func gcWriteBarrierBP()

gcWriteBarrierBX function #

func gcWriteBarrierBX()

gcWriteBarrierCX function #

Called from compiled code; declared for vet; do NOT call from Go.

func gcWriteBarrierCX()

gcWriteBarrierDX function #

func gcWriteBarrierDX()

gcWriteBarrierR8 function #

func gcWriteBarrierR8()

gcWriteBarrierR9 function #

func gcWriteBarrierR9()

gcWriteBarrierSI function #

func gcWriteBarrierSI()

gcallers function #

func gcallers(gp *g, skip int, pcbuf []uintptr) int

gcd function #

func gcd(a uint32, b uint32) uint32

gcdata method #

gcdata returns the number of bytes that contain pointers, and a ptr/nonptr bitmask covering those bytes. Note that this bitmask might be larger than internal/abi.MaxPtrmaskBytes.

func (r *stackObjectRecord) gcdata() (uintptr, *byte)

gcenable function #

gcenable is called after the bulk of the runtime initialization, just before we're about to start letting user code run. It kicks off the background sweeper goroutine, the background scavenger goroutine, and enables GC.

func gcenable()

gcinit function #

func gcinit()

gcmarknewobject function #

gcmarknewobject marks a newly allocated object black. obj must not contain any non-nil pointers. This is nosplit so it can manipulate a gcWork without preemption. go:nowritebarrier go:nosplit

func gcmarknewobject(span *mspan, obj uintptr)

gcount function #

func gcount() int32

gcstopm function #

Stops the current m for stopTheWorld. Returns when the world is restarted.

func gcstopm()

gdestroy function #

func gdestroy(gp *g)

gdirname function #

gdirname returns the first filename from a buffer of directory entries, and a slice containing the remaining directory entries. If the buffer doesn't start with a valid directory entry, the returned name is nil. go:nosplit

func gdirname(buf []byte) (name []byte, rest []byte)

get method #

go:nosplit

func (b *mSpanStateBox) get() mSpanState

get method #

get returns the value of the i'th bit in the bitmap.

func (b *pageBits) get(i uint) uint

get1 method #

getX returns space in the write barrier buffer to store X pointers. getX will flush the buffer if necessary. Callers should use this as: buf := &getg().m.p.ptr().wbBuf p := buf.get2() p[0], p[1] = old, new ... actual memory write ... The caller must ensure there are no preemption points during the above sequence. There must be no preemption points while buf is in use because it is a per-P resource. There must be no preemption points between the buffer put and the write to memory because this could allow a GC phase change, which could result in missed write barriers. getX must be nowritebarrierrec to because write barriers here would corrupt the write barrier buffer. It (and everything it calls, if it called anything) has to be nosplit to avoid scheduling on to a different P and a different buffer. go:nowritebarrierrec go:nosplit

func (b *wbBuf) get1() *[1]uintptr

get2 method #

go:nowritebarrierrec go:nosplit

func (b *wbBuf) get2() *[2]uintptr

getAuxv function #

golang.org/x/sys/cpu uses getAuxv via linkname. Do not remove or change the type signature. (See go.dev/issue/57336.) getAuxv should be an internal detail, but widely used packages access it using linkname. Notable members of the hall of shame include: - github.com/cilium/ebpf Do not remove or change the type signature. See go.dev/issue/67401. go:linkname getAuxv

func getAuxv() []uintptr

getCachedDlogger function #

func getCachedDlogger() *dloggerImpl

getCachedDlogger function #

getCachedDlogger returns a cached dlogger if it can do so efficiently, or nil otherwise. The returned dlogger will be owned.

func getCachedDlogger() *dloggerImpl

getCntxct function #

func getCntxct(physical bool) uint32

getCntxct function #

func getCntxct() uint32

getCntxct function #

func getCntxct(physical bool) uint32

getExtraM function #

Return an M from the extra M list. Returns last == true if the list becomes empty because of this call. Spins waiting for an extra M, so caller must ensure that the list always contains or will soon contain at least one M. go:nosplit

func getExtraM() (mp *m, last bool)

getGCMask function #

getGCMask returns the pointer/nonpointer bitmask for type t. nosplit because it is used during write barriers and must not be preempted. go:nosplit

func getGCMask(t *_type) *byte

getGCMaskOnDemand function #

nosplit because it is used during write barriers and must not be preempted. go:nosplit

func getGCMaskOnDemand(t *_type) *byte

getGodebugEarly function #

getGodebugEarly extracts the environment variable GODEBUG from the environment on Unix-like operating systems and returns it. This function exists to extract GODEBUG early before much of the runtime is initialized.

func getGodebugEarly() string

getHPETTimecounter method #

go:nosplit

func (th *vdsoTimehands) getHPETTimecounter() (uint32, bool)

getHugePageSize function #

func getHugePageSize() uintptr

getLockRank function #

func getLockRank(l *mutex) lockRank

getLockRank function #

func getLockRank(l *mutex) lockRank

getMCache function #

getMCache is a convenience function which tries to obtain an mcache. Returns nil if we're not bootstrapping or we don't have a P. The caller's P must not change, so we must be in a non-preemptible state.

func getMCache(mp *m) *mcache

getOSRev function #

func getOSRev() int

getOrAddWeakHandle function #

Retrieves or creates a weak pointer handle for the object p.

func getOrAddWeakHandle(p unsafe.Pointer) *atomic.Uintptr

getPageSize function #

func getPageSize() uintptr

getPageSize function #

func getPageSize() uintptr

getPageSize function #

func getPageSize() uintptr

getPageSize function #

func getPageSize() uintptr

getPageSize function #

func getPageSize() uintptr

getPageSize function #

func getPageSize() uintptr

getPageSize function #

func getPageSize() uintptr

getPageSize function #

func getPageSize() uintptr

getPinnerBits method #

nosplit, because it's called by isPinned, which is nosplit go:nosplit

func (s *mspan) getPinnerBits() *pinnerBits

getPtr method #

Remove and return a potential pointer to a stack object. Returns 0 if there are no more pointers available. This prefers non-conservative pointers so we scan stack objects precisely if there are any non-conservative pointers to them.

func (s *stackScanState) getPtr() (p uintptr, conservative bool)

getRandomData function #

go:wasmimport gojs runtime.getRandomData go:noescape

func getRandomData(r []byte)

getStackMap method #

getStackMap returns the locals and arguments live pointer maps, and stack object list for frame.

func (frame *stkframe) getStackMap(debug bool) (locals bitvector, args bitvector, objs []stackObjectRecord)

getStaticuint64s function #

getStaticuint64s is called by the reflect package to get a pointer to the read-only array. go:linkname getStaticuint64s

func getStaticuint64s() *[256]uint64

getTSCTimecounter method #

go:nosplit

func (th *vdsoTimehands) getTSCTimecounter() uint32

getTimecounter method #

go:nosplit

func (th *vdsoTimehands) getTimecounter() (uint32, bool)

getTimecounter method #

go:nosplit

func (th *vdsoTimehands) getTimecounter() (uint32, bool)

getTimecounter method #

go:nosplit

func (th *vdsoTimehands) getTimecounter() (uint32, bool)

getTimecounter method #

go:nosplit

func (th *vdsoTimehands) getTimecounter() (uint32, bool)

getWeakHandle function #

func getWeakHandle(p unsafe.Pointer) *atomic.Uintptr

getcallerfp function #

getcallerfp returns the frame pointer of the caller of the caller of this function. go:nosplit go:noinline

func getcallerfp() uintptr

getcontext function #

go:nosplit

func getcontext(context *ucontext)

getcontext function #

go:noescape

func getcontext(ctxt unsafe.Pointer)

getcpucap function #

Return the minimum value seen for the zone CPU cap, or 0 if no cap is detected.

func getcpucap() uint64

getegid function #

go:nosplit

func getegid() int32

getempty function #

getempty pops an empty work buffer off the work.empty list, allocating new buffers if none are available. go:nowritebarrier

func getempty() *workbuf

geteuid function #

go:nosplit

func geteuid() int32

getfp function #

getfp returns the frame pointer register of its caller or 0 if not implemented. TODO: Make this a compiler intrinsic

func getfp() uintptr

getfp function #

getfp returns the frame pointer register of its caller or 0 if not implemented. TODO: Make this a compiler intrinsic

func getfp() uintptr

getfp function #

getfp returns the frame pointer register of its caller or 0 if not implemented. TODO: Make this a compiler intrinsic

func getfp() uintptr

getfp function #

getfp returns the frame pointer register of its caller or 0 if not implemented. TODO: Make this a compiler intrinsic

func getfp() uintptr

getfp function #

getfp returns the frame pointer register of its caller or 0 if not implemented. TODO: Make this a compiler intrinsic

func getfp() uintptr

getfp function #

getfp returns the frame pointer register of its caller or 0 if not implemented. TODO: Make this a compiler intrinsic

func getfp() uintptr

getfp function #

getfp returns the frame pointer register of its caller or 0 if not implemented. TODO: Make this a compiler intrinsic

func getfp() uintptr

getfp function #

getfp returns the frame pointer register of its caller or 0 if not implemented. TODO: Make this a compiler intrinsic

func getfp() uintptr

getfp function #

getfp returns the frame pointer register of its caller or 0 if not implemented. TODO: Make this a compiler intrinsic

func getfp() uintptr

getfp function #

getfp returns the frame pointer register of its caller or 0 if not implemented. TODO: Make this a compiler intrinsic

func getfp() uintptr

getfp function #

getfp returns the frame pointer register of its caller or 0 if not implemented. TODO: Make this a compiler intrinsic

func getfp() uintptr

getg function #

getg returns the pointer to the current g. The compiler rewrites calls to this function into instructions that fetch the g directly (from TLS or from the dedicated register).

func getg() *g

getgid function #

go:nosplit

func getgid() int32

getitab function #

getitab should be an internal detail, but widely used packages access it using linkname. Notable members of the hall of shame include: - github.com/bytedance/sonic Do not remove or change the type signature. See go.dev/issue/67401. go:linkname getitab

func getitab(inter *interfacetype, typ *_type, canfail bool) *itab

getlasterror function #

in sys_windows_386.s and sys_windows_amd64.s:

func getlasterror() uint32

getm function #

A helper function for EnsureDropM. getm should be an internal detail, but widely used packages access it using linkname. Notable members of the hall of shame include: - fortio.org/log Do not remove or change the type signature. See go.dev/issue/67401. go:linkname getm

func getm() uintptr

getncpu function #

func getncpu() int32

getncpu function #

func getncpu() int32

getncpu function #

func getncpu() int32

getncpu function #

func getncpu() int32

getncpu function #

go:systemstack

func getncpu() int32

getncpu function #

func getncpu() int32

getncpu function #

func getncpu() int32

getpid function #

func getpid() uint64

getpid function #

func getpid() int

getproccount function #

func getproccount() int32

getproccount function #

func getproccount() int32

getproccount function #

func getproccount() int32

getrctl function #

go:nosplit

func getrctl(controlname unsafe.Pointer, oldbuf unsafe.Pointer, newbuf unsafe.Pointer, flags uint32) uintptr

getsig function #

go:nosplit go:nowritebarrierrec

func getsig(i uint32) uintptr

getsig function #

go:nosplit go:nowritebarrierrec

func getsig(i uint32) uintptr

getsig function #

go:nosplit go:nowritebarrierrec

func getsig(i uint32) uintptr

getsig function #

go:nosplit go:nowritebarrierrec

func getsig(i uint32) uintptr

getsig function #

go:nosplit go:nowritebarrierrec

func getsig(i uint32) uintptr

getsig function #

go:nosplit go:nowritebarrierrec

func getsig(i uint32) uintptr

getsig function #

go:nosplit go:nowritebarrierrec

func getsig(i uint32) uintptr

getsig function #

go:nosplit go:nowritebarrierrec

func getsig(i uint32) uintptr

getthrid function #

func getthrid() int32

getthrid function #

go:nosplit go:cgo_unsafe_args

func getthrid() (tid int32)

getthrid_trampoline function #

func getthrid_trampoline()

gettid function #

func gettid() uint32

getuid function #

go:nosplit

func getuid() int32

gfget function #

Get from gfree list. If local list is empty, grab a batch from global list.

func gfget(pp *p) *g

gfpurge function #

Purge all cached G's from gfree list to the global list.

func gfpurge(pp *p)

gfput function #

Put on gfree list. If local list is too long, transfer a batch to the global list.

func gfput(pp *p, gp *g)

globrunqget function #

Try get a batch of G's from the global runnable queue. sched.lock must be held.

func globrunqget(pp *p, max int32) *g

globrunqput function #

Put gp on the global runnable queue. sched.lock must be held. May run during STW, so write barriers are not allowed. go:nowritebarrierrec

func globrunqput(gp *g)

globrunqputbatch function #

Put a batch of runnable goroutines on the global runnable queue. This clears *batch. sched.lock must be held. May run during STW, so write barriers are not allowed. go:nowritebarrierrec

func globrunqputbatch(batch *gQueue, n int32)

globrunqputhead function #

Put gp at the head of the global runnable queue. sched.lock must be held. May run during STW, so write barriers are not allowed. go:nowritebarrierrec

func globrunqputhead(gp *g)

goPanicExtendIndex function #

failures in the comparisons for s[x], 0 <= x < y (y == len(s))

func goPanicExtendIndex(hi int, lo uint, y int)

goPanicExtendIndexU function #

func goPanicExtendIndexU(hi uint, lo uint, y int)

goPanicExtendSlice3Acap function #

func goPanicExtendSlice3Acap(hi int, lo uint, y int)

goPanicExtendSlice3AcapU function #

func goPanicExtendSlice3AcapU(hi uint, lo uint, y int)

goPanicExtendSlice3Alen function #

failures in the comparisons for s[::x], 0 <= x <= y (y == len(s) or cap(s))

func goPanicExtendSlice3Alen(hi int, lo uint, y int)

goPanicExtendSlice3AlenU function #

func goPanicExtendSlice3AlenU(hi uint, lo uint, y int)

goPanicExtendSlice3B function #

failures in the comparisons for s[:x:y], 0 <= x <= y

func goPanicExtendSlice3B(hi int, lo uint, y int)

goPanicExtendSlice3BU function #

func goPanicExtendSlice3BU(hi uint, lo uint, y int)

goPanicExtendSlice3C function #

failures in the comparisons for s[x:y:], 0 <= x <= y

func goPanicExtendSlice3C(hi int, lo uint, y int)

goPanicExtendSlice3CU function #

func goPanicExtendSlice3CU(hi uint, lo uint, y int)

goPanicExtendSliceAcap function #

func goPanicExtendSliceAcap(hi int, lo uint, y int)

goPanicExtendSliceAcapU function #

func goPanicExtendSliceAcapU(hi uint, lo uint, y int)

goPanicExtendSliceAlen function #

failures in the comparisons for s[:x], 0 <= x <= y (y == len(s) or cap(s))

func goPanicExtendSliceAlen(hi int, lo uint, y int)

goPanicExtendSliceAlenU function #

func goPanicExtendSliceAlenU(hi uint, lo uint, y int)

goPanicExtendSliceB function #

failures in the comparisons for s[x:y], 0 <= x <= y

func goPanicExtendSliceB(hi int, lo uint, y int)

goPanicExtendSliceBU function #

func goPanicExtendSliceBU(hi uint, lo uint, y int)

goPanicIndex function #

failures in the comparisons for s[x], 0 <= x < y (y == len(s)) go:yeswritebarrierrec

func goPanicIndex(x int, y int)

goPanicIndexU function #

go:yeswritebarrierrec

func goPanicIndexU(x uint, y int)

goPanicSlice3Acap function #

func goPanicSlice3Acap(x int, y int)

goPanicSlice3AcapU function #

func goPanicSlice3AcapU(x uint, y int)

goPanicSlice3Alen function #

failures in the comparisons for s[::x], 0 <= x <= y (y == len(s) or cap(s))

func goPanicSlice3Alen(x int, y int)

goPanicSlice3AlenU function #

func goPanicSlice3AlenU(x uint, y int)

goPanicSlice3B function #

failures in the comparisons for s[:x:y], 0 <= x <= y

func goPanicSlice3B(x int, y int)

goPanicSlice3BU function #

func goPanicSlice3BU(x uint, y int)

goPanicSlice3C function #

failures in the comparisons for s[x:y:], 0 <= x <= y

func goPanicSlice3C(x int, y int)

goPanicSlice3CU function #

func goPanicSlice3CU(x uint, y int)

goPanicSliceAcap function #

go:yeswritebarrierrec

func goPanicSliceAcap(x int, y int)

goPanicSliceAcapU function #

go:yeswritebarrierrec

func goPanicSliceAcapU(x uint, y int)

goPanicSliceAlen function #

failures in the comparisons for s[:x], 0 <= x <= y (y == len(s) or cap(s)) go:yeswritebarrierrec

func goPanicSliceAlen(x int, y int)

goPanicSliceAlenU function #

go:yeswritebarrierrec

func goPanicSliceAlenU(x uint, y int)

goPanicSliceB function #

failures in the comparisons for s[x:y], 0 <= x <= y go:yeswritebarrierrec

func goPanicSliceB(x int, y int)

goPanicSliceBU function #

go:yeswritebarrierrec

func goPanicSliceBU(x uint, y int)

goPanicSliceConvert function #

failures in the conversion ([x]T)(s) or (*[x]T)(s), 0 <= x <= y, y == len(s)

func goPanicSliceConvert(x int, y int)

goStatusToTraceGoStatus function #

goStatusToTraceGoStatus translates the internal status to tracGoStatus. status must not be _Gdead or any status whose name has the suffix "_unused." nosplit because it's part of writing an event for an M, which must not have any stack growth. go:nosplit

func goStatusToTraceGoStatus(status uint32, wr waitReason) traceGoStatus

goargs function #

func goargs()

gobytes function #

used by cmd/cgo

func gobytes(p *byte, n int) (b []byte)

godebugNotify function #

func godebugNotify(envChanged bool)

godebug_registerMetric function #

go:linkname godebug_registerMetric internal/godebug.registerMetric

func godebug_registerMetric(name string, read func() uint64)

godebug_setNewIncNonDefault function #

go:linkname godebug_setNewIncNonDefault internal/godebug.setNewIncNonDefault

func godebug_setNewIncNonDefault(newIncNonDefault func(string) func())

godebug_setUpdate function #

go:linkname godebug_setUpdate internal/godebug.setUpdate

func godebug_setUpdate(update func(string, string))

goenvs function #

func goenvs()

goenvs function #

func goenvs()

goenvs function #

func goenvs()

goenvs function #

func goenvs()

goenvs function #

func goenvs()

goenvs function #

func goenvs()

goenvs function #

func goenvs()

goenvs function #

func goenvs()

goenvs function #

func goenvs()

goenvs function #

goenvs caches the Plan 9 environment variables at start of execution into string array envs, to supply the initial contents for os.Environ. Subsequent calls to os.Setenv will change this cache, without writing back to the (possibly shared) Plan 9 environment, so that Setenv and Getenv conform to the same Posix semantics as on other operating systems. For Plan 9 shared environment semantics, instead of Getenv(key) and Setenv(key, value), one can use os.ReadFile("/env/" + key) and os.WriteFile("/env/" + key, value, 0666) respectively. go:nosplit

func goenvs()

goenvs function #

func goenvs()

goenvs function #

func goenvs()

goenvs_unix function #

func goenvs_unix()

goexit function #

goexit is the return stub at the top of every goroutine call stack. Each goroutine stack is constructed as if goexit called the goroutine's entry point function, so that when the entry point function returns, it will return to goexit, which will call goexit1 to perform the actual exit. This function must never be called directly. Call goexit1 instead. gentraceback assumes that goexit terminates the stack. A direct call on the stack will cause gentraceback to stop walking the stack prematurely and if there is leftover state it may panic.

func goexit(neverCallThisFunction)

goexit0 function #

goexit continuation on g0.

func goexit0(gp *g)

goexit1 function #

Finishes execution of the current goroutine.

func goexit1()

goexitsall function #

func goexitsall(status *byte)

gogetenv function #

func gogetenv(key string) string

gogo function #

func gogo(buf *gobuf)

gopanic function #

The implementation of the predeclared function panic. The compiler emits calls to this function. gopanic should be an internal detail, but widely used packages access it using linkname. Notable members of the hall of shame include: - go.undefinedlabs.com/scopeagent - github.com/goplus/igop Do not remove or change the type signature. See go.dev/issue/67401. go:linkname gopanic

func gopanic(e any)

gopark function #

Puts the current goroutine into a waiting state and calls unlockf on the system stack. If unlockf returns false, the goroutine is resumed. unlockf must not access this G's stack, as it may be moved between the call to gopark and the call to unlockf. Note that because unlockf is called after putting the G into a waiting state, the G may have already been readied by the time unlockf is called unless there is external synchronization preventing the G from being readied. If unlockf returns false, it must guarantee that the G cannot be externally readied. Reason explains why the goroutine has been parked. It is displayed in stack traces and heap dumps. Reasons should be unique and descriptive. Do not re-use reasons, add new ones. gopark should be an internal detail, but widely used packages access it using linkname. Notable members of the hall of shame include: - gvisor.dev/gvisor - github.com/sagernet/gvisor Do not remove or change the type signature. See go.dev/issue/67401. go:linkname gopark

func gopark(unlockf func(*g, unsafe.Pointer) bool, lock unsafe.Pointer, reason waitReason, traceReason traceBlockReason, traceskip int)

goparkunlock function #

Puts the current goroutine into a waiting state and unlocks the lock. The goroutine can be made runnable again by calling goready(gp).

func goparkunlock(lock *mutex, reason waitReason, traceReason traceBlockReason, traceskip int)

gopreempt_m function #

func gopreempt_m(gp *g)

goready function #

goready should be an internal detail, but widely used packages access it using linkname. Notable members of the hall of shame include: - gvisor.dev/gvisor - github.com/sagernet/gvisor Do not remove or change the type signature. See go.dev/issue/67401. go:linkname goready

func goready(gp *g, traceskip int)

gorecover function #

The implementation of the predeclared function recover. Cannot split the stack because it needs to reliably find the stack segment of its caller. TODO(rsc): Once we commit to CopyStackAlways, this doesn't need to be nosplit. go:nosplit

func gorecover(argp uintptr) any

goroutineProfileInternal function #

func goroutineProfileInternal(p []profilerecord.StackRecord) (n int, ok bool)

goroutineProfileWithLabels function #

labels may be nil. If labels is non-nil, it must have the same length as p.

func goroutineProfileWithLabels(p []profilerecord.StackRecord, labels []unsafe.Pointer) (n int, ok bool)

goroutineProfileWithLabelsConcurrent function #

func goroutineProfileWithLabelsConcurrent(p []profilerecord.StackRecord, labels []unsafe.Pointer) (n int, ok bool)

goroutineProfileWithLabelsSync function #

func goroutineProfileWithLabelsSync(p []profilerecord.StackRecord, labels []unsafe.Pointer) (n int, ok bool)

goroutineReady function #

Ready the goroutine arg.

func goroutineReady(arg any, _ uintptr, _ int64)

goroutineheader function #

func goroutineheader(gp *g)

goschedIfBusy function #

goschedIfBusy yields the processor like gosched, but only does so if there are no idle Ps or if we're on the only P and there's nothing in the run queue. In both cases, there is freely available idle time. go:nosplit

func goschedIfBusy()

goschedImpl function #

func goschedImpl(gp *g, preempted bool)

gosched_m function #

Gosched continuation on g0.

func gosched_m(gp *g)

goschedguarded function #

goschedguarded yields the processor like gosched, but also checks for forbidden states and opts out of the yield in those cases. go:nosplit

func goschedguarded()

goschedguarded_m function #

goschedguarded is a forbidden-states-avoided version of gosched_m.

func goschedguarded_m(gp *g)

gostartcall function #

adjust Gobuf as if it executed a call to fn with context ctxt and then did an immediate Gosave.

func gostartcall(buf *gobuf, fn unsafe.Pointer, ctxt unsafe.Pointer)

gostartcall function #

adjust Gobuf as if it executed a call to fn with context ctxt and then did an immediate Gosave.

func gostartcall(buf *gobuf, fn unsafe.Pointer, ctxt unsafe.Pointer)

gostartcall function #

adjust Gobuf as if it executed a call to fn with context ctxt and then did an immediate Gosave.

func gostartcall(buf *gobuf, fn unsafe.Pointer, ctxt unsafe.Pointer)

gostartcall function #

adjust Gobuf as if it executed a call to fn with context ctxt and then did an immediate Gosave.

func gostartcall(buf *gobuf, fn unsafe.Pointer, ctxt unsafe.Pointer)

gostartcall function #

adjust Gobuf as if it executed a call to fn with context ctxt and then stopped before the first instruction in fn.

func gostartcall(buf *gobuf, fn unsafe.Pointer, ctxt unsafe.Pointer)

gostartcall function #

adjust Gobuf as if it executed a call to fn with context ctxt and then did an immediate Gosave.

func gostartcall(buf *gobuf, fn unsafe.Pointer, ctxt unsafe.Pointer)

gostartcall function #

adjust Gobuf as if it executed a call to fn with context ctxt and then did an immediate Gosave.

func gostartcall(buf *gobuf, fn unsafe.Pointer, ctxt unsafe.Pointer)

gostartcall function #

adjust Gobuf as it if executed a call to fn with context ctxt and then stopped before the first instruction in fn.

func gostartcall(buf *gobuf, fn unsafe.Pointer, ctxt unsafe.Pointer)

gostartcall function #

adjust Gobuf as if it executed a call to fn with context ctxt and then did an immediate Gosave.

func gostartcall(buf *gobuf, fn unsafe.Pointer, ctxt unsafe.Pointer)

gostartcall function #

adjust Gobuf as if it executed a call to fn with context ctxt and then did an immediate Gosave.

func gostartcall(buf *gobuf, fn unsafe.Pointer, ctxt unsafe.Pointer)

gostartcallfn function #

adjust Gobuf as if it executed a call to fn and then stopped before the first instruction in fn.

func gostartcallfn(gobuf *gobuf, fv *funcval)

gostring function #

This is exported via linkname to assembly in syscall (for Plan9) and cgo. go:linkname gostring

func gostring(p *byte) string

gostringn function #

func gostringn(p *byte, l int) string

gostringnocopy function #

go:nosplit

func gostringnocopy(str *byte) string

gostringw function #

func gostringw(strw *uint16) string

gotraceback function #

gotraceback returns the current traceback settings. If level is 0, suppress all tracebacks. If level is 1, show tracebacks, but exclude runtime frames. If level is 2, show tracebacks including runtime frames. If all is set, print all goroutine stacks. Otherwise, print just the current goroutine. If crash is set, crash (core dump, etc) after tracebacking. go:nosplit

func gotraceback() (level int32, all bool, crash bool)

goyield function #

goyield is like Gosched, but it: - emits a GoPreempt trace event instead of a GoSched trace event - puts the current G on the runq of the current P instead of the globrunq goyield should be an internal detail, but widely used packages access it using linkname. Notable members of the hall of shame include: - gvisor.dev/gvisor - github.com/sagernet/gvisor Do not remove or change the type signature. See go.dev/issue/67401. go:linkname goyield

func goyield()

goyield_m function #

func goyield_m(gp *g)

gp method #

func (c *sigctxt) gp() uint64

gp method #

func (c *sigctxt) gp() uint64

gp method #

func (c *sigctxt) gp() uint64

greyobject function #

obj is the start of an object with mark mbits. If it isn't already marked, mark it and enqueue into gcw. base and off are for debugging only and could be removed. See also wbBufFlush1, which partially duplicates this logic. go:nowritebarrierrec

func greyobject(obj uintptr, base uintptr, off uintptr, span *mspan, gcw *gcWork, objIndex uintptr)

grow method #

Try to add at least npage pages of memory to the heap, returning how much the heap grew by and whether it worked. h.lock must be held.

func (h *mheap) grow(npage uintptr) (uintptr, bool)

grow method #

grow allocates a new empty span from the heap and initializes it for c's size class.

func (c *mcentral) grow() *mspan

grow method #

grow sets up the metadata for the address range [base, base+size). It may allocate metadata, in which case *p.sysStat will be updated. p.mheapLock must be held.

func (p *pageAlloc) grow(base uintptr, size uintptr)

grow method #

sysGrow updates the index's backing store in response to a heap growth. Returns the amount of memory added to sysStat.

func (s *scavengeIndex) grow(base uintptr, limit uintptr, sysStat *sysMemStat) uintptr

growMemory function #

Implemented in src/runtime/sys_wasm.s

func growMemory(pages int32) int32

growWork function #

func growWork(t *maptype, h *hmap, bucket uintptr)

growWork_fast32 function #

func growWork_fast32(t *maptype, h *hmap, bucket uintptr)

growWork_fast64 function #

func growWork_fast64(t *maptype, h *hmap, bucket uintptr)

growWork_faststr function #

func growWork_faststr(t *maptype, h *hmap, bucket uintptr)

growing method #

growing reports whether h is growing. The growth may be to the same size or bigger.

func (h *hmap) growing() bool

growslice function #

growslice allocates new backing store for a slice. arguments: oldPtr = pointer to the slice's backing array newLen = new length (= oldLen + num) oldCap = original slice's capacity. num = number of elements being added et = element type return values: newPtr = pointer to the new backing store newLen = same value as the argument newCap = capacity of the new backing store Requires that uint(newLen) > uint(oldCap). Assumes the original slice length is newLen - num A new backing store is allocated with space for at least newLen elements. Existing entries [0, oldLen) are copied over to the new backing store. Added entries [oldLen, newLen) are not initialized by growslice (although for pointer-containing element types, they are zeroed). They must be initialized by the caller. Trailing entries [newLen, newCap) are zeroed. growslice's odd calling convention makes the generated code that calls this function simpler. In particular, it accepts and returns the new length so that the old length is not live (does not need to be spilled/restored) and the new length is returned (also does not need to be spilled/restored). growslice should be an internal detail, but widely used packages access it using linkname. Notable members of the hall of shame include: - github.com/bytedance/sonic - github.com/chenzhuoyu/iasm - github.com/cloudwego/dynamicgo - github.com/ugorji/go/codec Do not remove or change the type signature. See go.dev/issue/67401. go:linkname growslice

func growslice(oldPtr unsafe.Pointer, newLen int, oldCap int, num int, et *_type) slice

gs method #

func (c *sigctxt) gs() uint64

gs method #

func (c *sigctxt) gs() uint32

gs method #

func (c *sigctxt) gs() uint64

gs method #

func (c *sigctxt) gs() uint32

gs method #

func (c *sigctxt) gs() uint64

gs method #

func (c *sigctxt) gs() uint64

gs method #

func (c *sigctxt) gs() uint32

gs method #

func (c *sigctxt) gs() uint64

gs method #

func (c *sigctxt) gs() uint64

gs method #

func (c *sigctxt) gs() uint64

gs method #

func (c *sigctxt) gs() uint32

guintptr method #

go:nosplit

func (gp *g) guintptr() guintptr

gwrite function #

write to goroutine-local buffer if diverting output, or else standard error.

func gwrite(b []byte)

handleAsyncEvent function #

func handleAsyncEvent()

handleEvent function #

handleEvent gets invoked on a call from JavaScript into Go. It calls the event handler of the syscall/js package and then parks the handler goroutine to allow other goroutines to run before giving execution back to JavaScript. When no other goroutine is awake any more, beforeIdle resumes the handler goroutine. Now that the same goroutine is running as was running when the call came in from JavaScript, execution can be safely passed back to JavaScript.

func handleEvent()

handoff function #

go:nowritebarrier

func handoff(b *workbuf) *workbuf

handoffp function #

Hands off P from syscall or locked M. Always runs without a P, so write barriers are not allowed. go:nowritebarrierrec

func handoffp(pp *p)

has method #

has returns true if the set contains a given statDep.

func (s *statDepSet) has(d statDep) bool

hasCgoOnStack method #

func (mp *m) hasCgoOnStack() bool

hasOverflow method #

hasOverflow reports whether b has any overflow records pending.

func (b *profBuf) hasOverflow() bool

hashGrow function #

func hashGrow(t *maptype, h *hmap)

hchan method #

hchan returns the channel in t.arg. t must be a timer with a channel.

func (t *timer) hchan() *hchan

heapBits method #

heapBits returns the heap ptr/scalar bits stored at the end of the span for small object spans and heap arena spans. Note that the uintptr of each element means something different for small object spans and for heap arena spans. Small object spans are easy: they're never interpreted as anything but uintptr, so they're immune to differences in endianness. However, the heapBits for user arena spans is exposed through a dummy type descriptor, so the byte ordering needs to match the same byte ordering the compiler would emit. The compiler always emits the bitmap data in little endian byte ordering, so on big endian platforms these uintptrs will have their byte orders swapped from what they normally would be. heapBitsInSpan(span.elemsize) or span.isUserArenaChunk must be true. go:nosplit

func (span *mspan) heapBits() []uintptr

heapBitsInSpan function #

heapBitsInSpan returns true if the size of an object implies its ptr/scalar data is stored at the end of the span, and is accessible via span.heapBits. Note: this works for both rounded-up sizes (span.elemsize) and unrounded type sizes because minSizeForMallocHeader is guaranteed to be at a size class boundary. go:nosplit

func heapBitsInSpan(userSize uintptr) bool

heapBitsSlice function #

Helper for constructing a slice for the span's heap bits. go:nosplit

func heapBitsSlice(spanBase uintptr, spanSize uintptr) []uintptr

heapBitsSmallForAddr method #

heapBitsSmallForAddr loads the heap bits for the object stored at addr from span.heapBits. addr must be the base pointer of an object in the span. heapBitsInSpan(span.elemsize) must be true. go:nosplit

func (span *mspan) heapBitsSmallForAddr(addr uintptr) uintptr

heapGoal method #

heapGoal returns the current heap goal.

func (c *gcControllerState) heapGoal() uint64

heapGoalInternal method #

heapGoalInternal is the implementation of heapGoal which returns additional information that is necessary for computing the trigger. The returned minTrigger is always <= goal.

func (c *gcControllerState) heapGoalInternal() (goal uint64, minTrigger uint64)

heapObjectsCanMove function #

heapObjectsCanMove always returns false in the current garbage collector. It exists for go4.org/unsafe/assume-no-moving-gc, which is an unfortunate idea that had an even more unfortunate implementation. Every time a new Go release happened, the package stopped building, and the authors had to add a new file with a new //go:build line, and then the entire ecosystem of packages with that as a dependency had to explicitly update to the new version. Many packages depend on assume-no-moving-gc transitively, through paths like inet.af/netaddr -> go4.org/intern -> assume-no-moving-gc. This was causing a significant amount of friction around each new release, so we added this bool for the package to //go:linkname instead. The bool is still unfortunate, but it's not as bad as breaking the ecosystem on every new release. If the Go garbage collector ever does move heap objects, we can set this to true to break all the programs using assume-no-moving-gc. go:linkname heapObjectsCanMove

func heapObjectsCanMove() bool

heapRetained function #

heapRetained returns an estimate of the current heap RSS.

func heapRetained() uint64

heapSetTypeLarge function #

func heapSetTypeLarge(x uintptr, dataSize uintptr, typ *_type, span *mspan) uintptr

heapSetTypeNoHeader function #

func heapSetTypeNoHeader(x uintptr, dataSize uintptr, typ *_type, span *mspan) uintptr

heapSetTypeSmallHeader function #

func heapSetTypeSmallHeader(x uintptr, dataSize uintptr, typ *_type, header **_type, span *mspan) uintptr

hex method #

go:nosplit

func (l *dloggerImpl) hex(x uint64) *dloggerImpl

hex method #

go:nosplit

func (l dloggerFake) hex(x uint64) dloggerFake

hexdumpWords function #

hexdumpWords prints a word-oriented hex dump of [p, end). If mark != nil, it will be called with each printed word's address and should return a character mark to appear just before that word's value. It can return 0 to indicate no mark.

func hexdumpWords(p uintptr, end uintptr, mark func(uintptr) byte)

hi method #

func (c *sigctxt) hi() uint64

hi method #

func (c *sigctxt) hi() uint64

hi method #

func (c *sigctxt) hi() uint32

i method #

go:nosplit

func (l *dloggerImpl) i(x int) *dloggerImpl

i method #

go:nosplit

func (l dloggerFake) i(x int) dloggerFake

i16 method #

go:nosplit

func (l dloggerFake) i16(x int16) dloggerFake

i16 method #

go:nosplit

func (l *dloggerImpl) i16(x int16) *dloggerImpl

i32 method #

go:nosplit

func (l dloggerFake) i32(x int32) dloggerFake

i32 method #

go:nosplit

func (l *dloggerImpl) i32(x int32) *dloggerImpl

i64 method #

go:nosplit

func (l dloggerFake) i64(x int64) dloggerFake

i64 method #

go:nosplit

func (l *dloggerImpl) i64(x int64) *dloggerImpl

i8 method #

go:nosplit

func (l dloggerFake) i8(x int8) dloggerFake

i8 method #

go:nosplit

func (l *dloggerImpl) i8(x int8) *dloggerImpl

ifaceHash function #

func ifaceHash(i interface{...}, seed uintptr) uintptr

ifaceeq function #

func ifaceeq(tab *itab, x unsafe.Pointer, y unsafe.Pointer) bool

ignoreSIGSYS function #

go:linkname ignoreSIGSYS os.ignoreSIGSYS

func ignoreSIGSYS()

ignoredNote function #

func ignoredNote(note *byte) bool

inHeapOrStack function #

inHeapOrStack is a variant of inheap that returns true for pointers into any allocated heap span. go:nowritebarrier go:nosplit

func inHeapOrStack(b uintptr) bool

inList method #

func (span *mspan) inList() bool

inPersistentAlloc function #

inPersistentAlloc reports whether p points to memory allocated by persistentalloc. This must be nosplit because it is called by the cgo checker code, which is called by the write barrier code. go:nosplit

func inPersistentAlloc(p uintptr) bool

inRange function #

inRange reports whether v0 or v1 are in the range [r0, r1].

func inRange(r0 uintptr, r1 uintptr, v0 uintptr, v1 uintptr) bool

inUserArenaChunk function #

inUserArenaChunk returns true if p points to a user arena chunk.

func inUserArenaChunk(p uintptr) bool

inVDSOPage function #

func inVDSOPage(pc uintptr) bool

inVDSOPage function #

vdsoMarker reports whether PC is on the VDSO page. go:nosplit

func inVDSOPage(pc uintptr) bool

incActive method #

incActive increments the active-count for the group. A group does not become durably blocked while the active-count is non-zero.

func (sg *synctestGroup) incActive()

incHead method #

incHead atomically increments the head of a headTailIndex.

func (h *atomicHeadTailIndex) incHead() headTailIndex

incPinCounter method #

incPinCounter is only called for multiple pins of the same object and records the _additional_ pins.

func (span *mspan) incPinCounter(offset uintptr)

incTail method #

incTail atomically increments the tail of a headTailIndex.

func (h *atomicHeadTailIndex) incTail() headTailIndex

incidlelocked function #

func incidlelocked(v int32)

increment method #

increment increases the cycle count by one, wrapping the value at mProfCycleWrap. It clears the flushed flag.

func (c *mProfCycleHolder) increment()

incrementOverflow method #

incrementOverflow records a single overflow at time now. It is racing against a possible takeOverflow in the reader.

func (b *profBuf) incrementOverflow(now int64)

incrnoverflow method #

incrnoverflow increments h.noverflow. noverflow counts the number of overflow buckets. This is used to trigger same-size map growth. See also tooManyOverflowBuckets. To keep hmap small, noverflow is a uint16. When there are few buckets, noverflow is an exact count. When there are many buckets, noverflow is an approximate count.

func (h *hmap) incrnoverflow()

indexNoFloat function #

indexNoFloat is bytealg.IndexString but safe to use in a note handler.

func indexNoFloat(s string, t string) int

inf2one function #

inf2one returns a signed 1 if f is an infinity and a signed 0 otherwise. The sign of the result is the sign of f.

func inf2one(f float64) float64

info method #

info returns the pollInfo corresponding to pd.

func (pd *pollDesc) info() pollInfo

inheap function #

inheap reports whether b is a pointer into a (potentially dead) heap object. It returns false for pointers into mSpanManual spans. Non-preemptible because it is used by write barriers. go:nowritebarrier go:nosplit

func inheap(b uintptr) bool

init method #

Initialize a single central free list.

func (c *mcentral) init(spc spanClass)

init method #

func (c *gcControllerState) init(gcPercent int32, memoryLimit int64)

init method #

Initialize the heap.

func (h *mheap) init()

init method #

init initializes u to start unwinding gp's stack and positions the iterator on gp's innermost frame. gp must not be the current G. A single unwinder can be reused for multiple unwinds.

func (u *unwinder) init(gp *g, flags unwindFlags)

init function #

func init()

init method #

init initializes the scavengeIndex. Returns the amount added to sysStat.

func (s *scavengeIndex) init(test bool, sysStat *sysMemStat) uintptr

init function #

func init()

init method #

Lock ranking an rwmutex has two aspects: Semantic ranking: this rwmutex represents some higher level lock that protects some resource (e.g., allocmLock protects creation of new Ms). The read and write locks of that resource need to be represented in the lock rank. Internal ranking: as an implementation detail, rwmutex uses two mutexes: rLock and wLock. These have lock order requirements: wLock must be locked before rLock. This also needs to be represented in the lock rank. Semantic ranking is represented by acquiring readRank during read lock and writeRank during write lock. wLock is held for the duration of a write lock, so it uses writeRank directly, both for semantic and internal ranking. rLock is only held temporarily inside the rlock/lock methods, so it uses readRankInternal to represent internal ranking. Semantic ranking is represented by a separate acquire of readRank for the duration of a read lock. The lock ranking must document this ordering: - readRankInternal is a leaf lock. - readRank is taken before readRankInternal. - writeRank is taken before readRankInternal. - readRank is placed in the lock order wherever a read lock of this rwmutex belongs. - writeRank is placed in the lock order wherever a write lock of this rwmutex belongs.

func (rw *rwmutex) init(readRank lockRank, readRankInternal lockRank, writeRank lockRank)

init function #

func init()

init function #

func init()

init method #

func (w *gcWork) init()

init method #

func (a *addrRanges) init(sysStat *sysMemStat)

init function #

func init()

init method #

func (p *pageAlloc) init(mheapLock *mutex, sysStat *sysMemStat, test bool)

init function #

func init()

init function #

func init()

init function #

start forcegc helper goroutine

func init()

init method #

init initializes a scavenger state and wires to the current G. Must be called from a regular goroutine that can allocate.

func (s *scavengerState) init()

init method #

init initializes pp, which may be a freshly allocated p or a previously destroyed p, and transitions it to status _Pgcstop.

func (pp *p) init(id int32)

init method #

Initialize a new span with the given start and npages.

func (span *mspan) init(base uintptr, npages uintptr)

init method #

init initializes ticks to maximize the chance that we have a good ticksPerSecond reference. Must not run concurrently with ticksPerSecond.

func (t *ticksType) init()

init method #

func (l *linearAlloc) init(base uintptr, size uintptr, mapMemory bool)

init method #

Initialize f to allocate objects of the given size, using the allocator to obtain chunks of memory.

func (f *fixalloc) init(size uintptr, first func(arg unsafe.Pointer, p unsafe.Pointer), arg unsafe.Pointer, stat *sysMemStat)

init function #

func init()

init function #

func init()

init function #

func init()

init method #

Initialize an empty doubly-linked list.

func (list *mSpanList) init()

init method #

init initializes a newly allocated timer t. Any code that allocates a timer must call t.init before using it. The arg and f can be set during init, or they can be nil in init and set by a future call to t.modify.

func (t *timer) init(f func(arg any, seq uintptr, delay int64), arg any)

initAlgAES function #

func initAlgAES()

initAt method #

func (u *unwinder) initAt(pc0 uintptr, sp0 uintptr, lr0 uintptr, gp *g, flags unwindFlags)

initBloc function #

func initBloc()

initExceptionHandler function #

func initExceptionHandler()

initHPETTimecounter function #

go:systemstack

func initHPETTimecounter(idx int)

initHeap method #

initHeap reestablishes the heap order in the slice ts.heap. It takes O(n) time for n=len(ts.heap), not the O(n log n) of n repeated add operations.

func (ts *timers) initHeap()

initHeapBits method #

initHeapBits initializes the heap bitmap for a span.

func (s *mspan) initHeapBits()

initHighResTimer function #

func initHighResTimer()

initLegacy function #

func initLegacy()

initLogd function #

func initLogd()

initLongPathSupport function #

initLongPathSupport enables long path support.

func initLongPathSupport()

initMetrics function #

initMetrics initializes the metrics map if it hasn't been yet. metricsSema must be held.

func initMetrics()

initOpenCodedDefers method #

func (p *_panic) initOpenCodedDefers(fn funcInfo, varp unsafe.Pointer) bool

initSecureMode function #

func initSecureMode()

initSecureMode function #

func initSecureMode()

initSecureMode function #

func initSecureMode()

initSpan method #

initSpan initializes a blank span s which will represent the range [base, base+npages*pageSize). typ is the type of span being allocated.

func (h *mheap) initSpan(s *mspan, typ spanAllocType, spanclass spanClass, base uintptr, npages uintptr)

initSysDirectory function #

func initSysDirectory()

initsig function #

func initsig(preinit bool)

initsig function #

func initsig(preinit bool)

initsig function #

func initsig(preinit bool)

initsig function #

Initialize signals. Called by libpreinit so runtime may not be initialized. go:nosplit go:nowritebarrierrec

func initsig(preinit bool)

injectglist function #

injectglist adds each runnable G on the list to some run queue, and clears glist. If there is no current P, they are added to the global queue, and up to npidle M's are started to run them. Otherwise, for each idle P, this adds a G to the global queue and starts an M. Any remaining G's are added to the current P's local run queue. This may temporarily acquire sched.lock. Can run concurrently with GC.

func injectglist(glist *gList)

insert method #

func (list *mSpanList) insert(span *mspan)

insertBack method #

func (list *mSpanList) insertBack(span *mspan)

int32Hash function #

func int32Hash(i uint32, seed uintptr) uintptr

int64Hash function #

func int64Hash(i uint64, seed uintptr) uintptr

int64div function #

func int64div(n int64, d int64) int64

int64mod function #

go:nosplit

func int64mod(n int64, d int64) int64

int64tofloat32 function #

func int64tofloat32(y int64) float32

int64tofloat64 function #

func int64tofloat64(y int64) float64

interequal function #

func interequal(p unsafe.Pointer, q unsafe.Pointer) bool

interfaceSwitch function #

interfaceSwitch compares t against the list of cases in s. If t matches case i, interfaceSwitch returns the case index i and an itab for the pair . If there is no match, return N,nil, where N is the number of cases.

func interfaceSwitch(s *abi.InterfaceSwitch, t *_type) (int, *itab)

interhash function #

func interhash(p unsafe.Pointer, h uintptr) uintptr

internal_cpu_getsysctlbyname function #

go:linkname internal_cpu_getsysctlbyname internal/cpu.getsysctlbyname

func internal_cpu_getsysctlbyname(name []byte) (int32, int32)

internal_cpu_getsystemcfg function #

go:nosplit go:linkname internal_cpu_getsystemcfg internal/cpu.getsystemcfg

func internal_cpu_getsystemcfg(label uint) uint

internal_cpu_sysctlUint64 function #

go:linkname internal_cpu_sysctlUint64 internal/cpu.sysctlUint64

func internal_cpu_sysctlUint64(mib []uint32) (uint64, bool)

internal_sync_fatal function #

go:linkname internal_sync_fatal internal/sync.fatal

func internal_sync_fatal(s string)

internal_sync_nanotime function #

go:linkname internal_sync_nanotime internal/sync.runtime_nanotime

func internal_sync_nanotime() int64

internal_sync_runtime_SemacquireMutex function #

go:linkname internal_sync_runtime_SemacquireMutex internal/sync.runtime_SemacquireMutex

func internal_sync_runtime_SemacquireMutex(addr *uint32, lifo bool, skipframes int)

internal_sync_runtime_Semrelease function #

go:linkname internal_sync_runtime_Semrelease internal/sync.runtime_Semrelease

func internal_sync_runtime_Semrelease(addr *uint32, handoff bool, skipframes int)

internal_sync_runtime_canSpin function #

Active spinning for sync.Mutex. go:linkname internal_sync_runtime_canSpin internal/sync.runtime_canSpin go:nosplit

func internal_sync_runtime_canSpin(i int) bool

internal_sync_runtime_doSpin function #

go:linkname internal_sync_runtime_doSpin internal/sync.runtime_doSpin go:nosplit

func internal_sync_runtime_doSpin()

internal_sync_throw function #

go:linkname internal_sync_throw internal/sync.throw

func internal_sync_throw(s string)

internal_syscall_gostring function #

internal_syscall_gostring is a version of gostring for internal/syscall/unix. go:linkname internal_syscall_gostring internal/syscall/unix.gostring

func internal_syscall_gostring(p *byte) string

internal_weak_runtime_makeStrongFromWeak function #

go:linkname internal_weak_runtime_makeStrongFromWeak weak.runtime_makeStrongFromWeak

func internal_weak_runtime_makeStrongFromWeak(u unsafe.Pointer) unsafe.Pointer

internal_weak_runtime_registerWeakPointer function #

go:linkname internal_weak_runtime_registerWeakPointer weak.runtime_registerWeakPointer

func internal_weak_runtime_registerWeakPointer(p unsafe.Pointer) unsafe.Pointer

intstring function #

func intstring(buf *[4]byte, v int64) (s string)

ip method #

func (c *context) ip() uintptr

ip method #

func (c *sigctxt) ip() uint32

ip method #

func (c *sigctxt) ip() uint32

ip method #

func (c *context) ip() uintptr

ip method #

func (c *context) ip() uintptr

ip method #

func (c *context) ip() uintptr

ip method #

func (c *sigctxt) ip() uint32

ip method #

func (c *sigctxt) ip() uint32

isAbort function #

isAbort returns true, if context r describes exception raised by calling runtime.abort function. go:nosplit

func isAbort(r *context) bool

isAbortPC function #

isAbortPC reports whether pc is the program counter at which runtime.abort raises a signal. It is nosplit because it's part of the isgoexception implementation. go:nosplit

func isAbortPC(pc uintptr) bool

isAsyncSafePoint function #

isAsyncSafePoint reports whether gp at instruction PC is an asynchronous safe point. This indicates that: 1. It's safe to suspend gp and conservatively scan its stack and registers. There are no potentially hidden pointer values and it's not in the middle of an atomic sequence like a write barrier. 2. gp has enough stack space to inject the asyncPreempt call. 3. It's generally safe to interact with the runtime, even if we're in a signal handler stopped here. For example, there are no runtime locks held, so acquiring a runtime lock won't self-deadlock. In some cases the PC is safe for asynchronous preemption but it also needs to adjust the resumption PC. The new PC is returned in the second result.

func isAsyncSafePoint(gp *g, pc uintptr, sp uintptr, lr uintptr) (bool, uintptr)

isDirectIface function #

isDirectIface reports whether t is stored directly in an interface value.

func isDirectIface(t *_type) bool

isDone method #

isDone returns true if all sweep work has been drained and no more outstanding sweepers exist. That is, when the sweep phase is completely done.

func (a *activeSweep) isDone() bool

isEmpty function #

isEmpty reports whether the given tophash array entry represents an empty bucket entry.

func isEmpty(x uint8) bool

isEmpty method #

isEmpty returns true if the hasFree flag is unset.

func (sc *scavChunkFlags) isEmpty() bool

isEmpty method #

func (list *mSpanList) isEmpty() bool

isExportedRuntime function #

isExportedRuntime reports whether name is an exported runtime function. It is only for runtime functions, so ASCII A-Z is fine.

func isExportedRuntime(name string) bool

isFinite function #

isFinite reports whether f is neither NaN nor an infinity.

func isFinite(f float64) bool

isFree method #

isFree reports whether the index'th object in s is unallocated. The caller must ensure s.state is mSpanInUse, and there must have been no preemption points since ensuring this (which could allow a GC transition, which would allow the state to change).

func (s *mspan) isFree(index uintptr) bool

isGC method #

func (r stwReason) isGC() bool

isGoPointerWithoutSpan function #

func isGoPointerWithoutSpan(p unsafe.Pointer) bool

isIdleInSynctest method #

func (w waitReason) isIdleInSynctest() bool

isInf function #

isInf reports whether f is an infinity.

func isInf(f float64) bool

isInlined method #

isInlined returns whether uf is an inlined frame.

func (u *inlineUnwinder) isInlined(uf inlineFrame) bool

isInlined method #

isInlined reports whether f should be re-interpreted as a *funcinl.

func (f *_func) isInlined() bool

isMarked method #

isMarked reports whether mark bit m is set.

func (m markBits) isMarked() bool

isMultiPinned method #

func (v *pinState) isMultiPinned() bool

isMutexWait method #

func (w waitReason) isMutexWait() bool

isNaN function #

isNaN reports whether f is an IEEE 754 “not-a-number” value.

func isNaN(f float64) (is bool)

isPinned method #

nosplit, because it's called by isPinned, which is nosplit go:nosplit

func (v *pinState) isPinned() bool

isPinned function #

isPinned checks if a Go pointer is pinned. nosplit, because it's called from nosplit code in cgocheck. go:nosplit

func isPinned(ptr unsafe.Pointer) bool

isPowerOfTwo function #

func isPowerOfTwo(x uintptr) bool

isSecureMode function #

func isSecureMode() bool

isSecureMode function #

func isSecureMode() bool

isSecureMode function #

func isSecureMode() bool

isSecureMode function #

func isSecureMode() bool

isShrinkStackSafe function #

isShrinkStackSafe returns whether it's safe to attempt to shrink gp's stack. Shrinking the stack is only safe when we have precise pointer maps for all frames on the stack. The caller must hold the _Gscan bit for gp or must be running gp itself.

func isShrinkStackSafe(gp *g) bool

isSweepDone function #

isSweepDone reports whether all spans are swept. Note that this condition may transition from false to true at any time as the sweeper runs. It may transition from true to false if a GC runs; to prevent that the caller must be non-preemptible or must somehow block GC progress.

func isSweepDone() bool

isSystemGoroutine function #

isSystemGoroutine reports whether the goroutine g must be omitted in stack dumps and deadlock detector. This is any goroutine that starts at a runtime.* entry point, except for runtime.main, runtime.handleAsyncEvent (wasm only) and sometimes runtime.runfinq. If fixed is true, any goroutine that can vary between user and system (that is, the finalizer goroutine) is considered a user goroutine.

func isSystemGoroutine(gp *g, fixed bool) bool

isUnusedUserArenaChunk method #

isUnusedUserArenaChunk indicates that the arena chunk has been set to fault and doesn't contain any scannable memory anymore. However, it might still be mSpanInUse as it sits on the quarantine list, since it needs to be swept. This is not safe to execute unless the caller has ownership of the mspan or the world is stopped (preemption is prevented while the relevant state changes). This is really only meant to be used by accounting tests in the runtime to distinguish when a span shouldn't be counted (since mSpanInUse might not be enough).

func (s *mspan) isUnusedUserArenaChunk() bool

isWaitingForSuspendG method #

func (w waitReason) isWaitingForSuspendG() bool

isWakeup function #

func isWakeup(ev *keventt) bool

isWakeup function #

func isWakeup(ev *keventt) bool

isgoexception function #

isgoexception reports whether this exception should be translated into a Go panic or throw. It is nosplit to avoid growing the stack in case we're aborting because of a stack overflow. go:nosplit

func isgoexception(info *exceptionrecord, r *context) bool

issetugid function #

func issetugid() int32

issetugid function #

func issetugid() int32

issetugid function #

func issetugid() int32

issetugid function #

go:nosplit go:cgo_unsafe_args

func issetugid() (ret int32)

issetugid function #

func issetugid() int32

issetugid function #

func issetugid() int32

issetugid function #

func issetugid() int32

issetugid_trampoline function #

func issetugid_trampoline()

issetugid_trampoline function #

func issetugid_trampoline()

isvalidaddr function #

checks if the address has shadow (i.e. heap or data/bss). go:nosplit

func isvalidaddr(addr unsafe.Pointer) bool

itabAdd function #

itabAdd adds the given itab to the itab hash table. itabLock must be held.

func itabAdd(m *itab)

itabHashFunc function #

func itabHashFunc(inter *interfacetype, typ *_type) uintptr

itabInit function #

itabInit fills in the m.Fun array with all the code pointers for the m.Inter/m.Type pair. If the type does not implement the interface, it sets m.Fun[0] to 0 and returns the name of an interface function that is missing. If !firstTime, itabInit will not write anything to m.Fun (see issue 65962). It is ok to call this multiple times on the same m, even concurrently (although it will only be called once with firstTime==true).

func itabInit(m *itab, firstTime bool) string

itab_callback function #

func itab_callback(tab *itab)

itabsinit function #

func itabsinit()

iterate_finq function #

go:nowritebarrier

func iterate_finq(callback func(*funcval, unsafe.Pointer, uintptr, *_type, *ptrtype))

iterate_itabs function #

func iterate_itabs(fn func(*itab))

iterate_memprof function #

func iterate_memprof(fn func(*bucket, uintptr, *uintptr, uintptr, uintptr, uintptr))

itoa function #

itoa converts val to a decimal representation. The result is written somewhere within buf and the location of the result is returned. buf must be at least 20 bytes. go:nosplit

func itoa(buf []byte, val uint64) []byte

itoaDiv function #

itoaDiv formats val/(10**dec) into buf.

func itoaDiv(buf []byte, val uint64, dec int) []byte

kevent function #

go:noescape

func kevent(kq int32, ch *keventt, nch int32, ev *keventt, nev int32, ts *timespec) int32

kevent function #

go:nosplit go:cgo_unsafe_args

func kevent(kq int32, ch *keventt, nch int32, ev *keventt, nev int32, ts *timespec) int32

kevent function #

go:noescape

func kevent(kq int32, ch *keventt, nch int32, ev *keventt, nev int32, ts *timespec) int32

kevent function #

go:nosplit go:cgo_unsafe_args

func kevent(kq int32, ch *keventt, nch int32, ev *keventt, nev int32, ts *timespec) int32

kevent function #

go:noescape

func kevent(kq int32, ch *keventt, nch int32, ev *keventt, nev int32, ts *timespec) int32

kevent function #

go:noescape

func kevent(kq int32, ch *keventt, nch int32, ev *keventt, nev int32, ts *timespec) int32

kevent_trampoline function #

func kevent_trampoline()

kevent_trampoline function #

func kevent_trampoline()

key32 function #

We use the uintptr mutex.key and note.key as a uint32. go:nosplit

func key32(p *uintptr) *uint32

key8 function #

go:nosplit

func key8(p *uintptr) *uint8

keys function #

keys for implementing maps.keys go:linkname keys maps.keys

func keys(m any, p unsafe.Pointer)

keys method #

func (b *bmap) keys() unsafe.Pointer

keys function #

keys for implementing maps.keys go:linkname keys maps.keys

func keys(m any, p unsafe.Pointer)

kqueue function #

func kqueue() int32

kqueue function #

go:nosplit go:cgo_unsafe_args

func kqueue() int32

kqueue function #

func kqueue() int32

kqueue function #

func kqueue() int32

kqueue function #

go:nosplit go:cgo_unsafe_args

func kqueue() int32

kqueue function #

func kqueue() int32

kqueue_trampoline function #

func kqueue_trampoline()

kqueue_trampoline function #

func kqueue_trampoline()

l1 method #

l1 returns the index into the first level of (*pageAlloc).chunks.

func (i chunkIdx) l1() uint

l1 method #

l1 returns the "l1" portion of an arenaIdx. Marked nosplit because it's called by spanOf and other nosplit functions. go:nosplit

func (i arenaIdx) l1() uint

l2 method #

l2 returns the "l2" portion of an arenaIdx. Marked nosplit because it's called by spanOf and other nosplit funcs. functions. go:nosplit

func (i arenaIdx) l2() uint

l2 method #

l2 returns the index into the second level of (*pageAlloc).chunks.

func (i chunkIdx) l2() uint

lastcontinuehandler function #

lastcontinuehandler is reached, because runtime cannot handle current exception. lastcontinuehandler will print crash info and exit. It is nosplit for the same reason as exceptionhandler. go:nosplit

func lastcontinuehandler(info *exceptionrecord, r *context, gp *g) int32

lastcontinuetramp function #

func lastcontinuetramp()

layout method #

func (s *mspan) layout() (size uintptr, n uintptr, total uintptr)

legacy_fastrand function #

go:linkname legacy_fastrand runtime.fastrand

func legacy_fastrand() uint32

legacy_fastrand64 function #

go:linkname legacy_fastrand64 runtime.fastrand64

func legacy_fastrand64() uint64

legacy_fastrandn function #

go:linkname legacy_fastrandn runtime.fastrandn

func legacy_fastrandn(n uint32) uint32

less function #

less checks if a < b, considering a & b running counts that may overflow the 32-bit range, and that their "unwrapped" difference is always less than 2^31.

func less(a uint32, b uint32) bool

lessEqual method #

lessEqual returns true if l1 is less than or equal to l2 in the offset address space.

func (l1 offAddr) lessEqual(l2 offAddr) bool

lessThan method #

lessThan returns true if l1 is less than l2 in the offset address space.

func (l1 offAddr) lessThan(l2 offAddr) bool

levelIndexToOffAddr function #

levelIndexToOffAddr converts an index into summary[level] into the corresponding address in the offset address space.

func levelIndexToOffAddr(level int, idx int) offAddr

lfnodeValidate function #

lfnodeValidate panics if node is not a valid address for use with lfstack.push. This only needs to be called when node is allocated.

func lfnodeValidate(node *lfnode)

lfstackPack function #

func lfstackPack(node *lfnode, cnt uintptr) uint64

lfstackUnpack function #

func lfstackUnpack(val uint64) *lfnode

libcCall function #

Call fn with arg as its argument. Return what fn returns. fn is the raw pc value of the entry point of the desired function. Switches to the system stack, if not already there. Preserves the calling point as the location where a profiler traceback will begin. go:nosplit

func libcCall(fn unsafe.Pointer, arg unsafe.Pointer) int32

libfuzzerCall4 function #

func libfuzzerCall4(fn *byte, fakePC uintptr, s1 unsafe.Pointer, s2 unsafe.Pointer, result uintptr)

libfuzzerCallTraceIntCmp function #

func libfuzzerCallTraceIntCmp(fn *byte, arg0 uintptr, arg1 uintptr, fakePC uintptr)

libfuzzerCallWithTwoByteBuffers function #

func libfuzzerCallWithTwoByteBuffers(fn *byte, start *byte, end *byte)

libfuzzerHookEqualFold function #

This function has now the same implementation as libfuzzerHookStrCmp because we lack better checks for case-insensitive string equality in the runtime package. go:nosplit

func libfuzzerHookEqualFold(s1 string, s2 string, fakePC int)

libfuzzerHookStrCmp function #

We call libFuzzer's __sanitizer_weak_hook_strcmp function which takes the following four arguments: 1. caller_pc: location of string comparison call site 2. s1: first string used in the comparison 3. s2: second string used in the comparison 4. result: an integer representing the comparison result. 0 indicates equality (comparison will ignored by libfuzzer), non-zero indicates a difference (comparison will be taken into consideration). go:nosplit

func libfuzzerHookStrCmp(s1 string, s2 string, fakePC int)

libfuzzerTraceCmp1 function #

In libFuzzer mode, the compiler inserts calls to libfuzzerTraceCmpN and libfuzzerTraceConstCmpN (where N can be 1, 2, 4, or 8) for encountered integer comparisons in the code to be instrumented. This may result in these functions having callers that are nosplit. That is why they must be nosplit. go:nosplit

func libfuzzerTraceCmp1(arg0 uint8, arg1 uint8, fakePC uint)

libfuzzerTraceCmp2 function #

go:nosplit

func libfuzzerTraceCmp2(arg0 uint16, arg1 uint16, fakePC uint)

libfuzzerTraceCmp4 function #

go:nosplit

func libfuzzerTraceCmp4(arg0 uint32, arg1 uint32, fakePC uint)

libfuzzerTraceCmp8 function #

go:nosplit

func libfuzzerTraceCmp8(arg0 uint64, arg1 uint64, fakePC uint)

libfuzzerTraceConstCmp1 function #

go:nosplit

func libfuzzerTraceConstCmp1(arg0 uint8, arg1 uint8, fakePC uint)

libfuzzerTraceConstCmp2 function #

go:nosplit

func libfuzzerTraceConstCmp2(arg0 uint16, arg1 uint16, fakePC uint)

libfuzzerTraceConstCmp4 function #

go:nosplit

func libfuzzerTraceConstCmp4(arg0 uint32, arg1 uint32, fakePC uint)

libfuzzerTraceConstCmp8 function #

go:nosplit

func libfuzzerTraceConstCmp8(arg0 uint64, arg1 uint64, fakePC uint)

libpreinit function #

Called to do synchronous initialization of Go code built with -buildmode=c-archive or -buildmode=c-shared. None of the Go runtime is initialized. go:nosplit go:nowritebarrierrec

func libpreinit()

libpreinit function #

Called to do synchronous initialization of Go code built with -buildmode=c-archive or -buildmode=c-shared. None of the Go runtime is initialized. go:nosplit go:nowritebarrierrec

func libpreinit()

libpreinit function #

Called to do synchronous initialization of Go code built with -buildmode=c-archive or -buildmode=c-shared. None of the Go runtime is initialized. go:nosplit go:nowritebarrierrec

func libpreinit()

libpreinit function #

Called to do synchronous initialization of Go code built with -buildmode=c-archive or -buildmode=c-shared. None of the Go runtime is initialized. go:nosplit go:nowritebarrierrec

func libpreinit()

limiting method #

limiting returns true if the CPU limiter is currently enabled, meaning the Go GC should take action to limit CPU utilization. It is safe to call concurrently with other operations.

func (l *gcCPULimiterState) limiting() bool

lo method #

func (c *sigctxt) lo() uint32

lo method #

func (c *sigctxt) lo() uint64

lo method #

func (c *sigctxt) lo() uint64

load method #

func (s *sweepClass) load() sweepClass

load method #

func (x *profAtomic) load() profIndex

load method #

load atomically reads the value of the stat. Must be nosplit as it is called in runtime initialization, e.g. newosproc0. go:nosplit

func (s *sysMemStat) load() uint64

load method #

load atomically reads a headTailIndex value.

func (h *atomicHeadTailIndex) load() headTailIndex

load method #

load loads and unpacks a scavChunkData.

func (sc *atomicScavChunkData) load() scavChunkData

loadOptionalSyscalls function #

func loadOptionalSyscalls()

load_g function #

Called from assembly only; declared for go vet.

func load_g()

load_g function #

Called from assembly only; declared for go vet.

func load_g()

load_g function #

Called from assembly only; declared for go vet.

func load_g()

load_g function #

Called from assembly only; declared for go vet.

func load_g()

load_g function #

Called from assembly only; declared for go vet.

func load_g()

load_g function #

Called from assembly only; declared for go vet.

func load_g()

load_g function #

Called from assembly only; declared for go vet.

func load_g()

load_g function #

func load_g()

lock function #

func lock(l *mutex)

lock function #

func lock(l *mutex)

lock function #

func lock(l *mutex)

lock function #

func lock(l *mutex)

lock method #

lock locks rw for writing.

func (rw *rwmutex) lock()

lock method #

func (ts *timers) lock()

lock method #

lock locks the timer, allowing reading or writing any of the timer fields.

func (t *timer) lock()

lock function #

func lock(l *mutex)

lock2 function #

func lock2(l *mutex)

lock2 function #

func lock2(l *mutex)

lock2 function #

func lock2(l *mutex)

lock2 function #

func lock2(l *mutex)

lock2 function #

func lock2(l *mutex)

lockInit function #

lockInit(l *mutex, rank int) sets the rank of lock before it is used. If there is no clear place to initialize a lock, then the rank of a lock can be specified during the lock call itself via lockWithRank(l *mutex, rank int).

func lockInit(l *mutex, rank lockRank)

lockInit function #

func lockInit(l *mutex, rank lockRank)

lockOSThread function #

go:nosplit

func lockOSThread()

lockRankMayQueueFinalizer function #

lockRankMayQueueFinalizer records the lock ranking effects of a function that may call queuefinalizer.

func lockRankMayQueueFinalizer()

lockRankMayTraceFlush function #

lockRankMayTraceFlush records the lock ranking effects of a potential call to traceFlush. nosplit because traceAcquire is nosplit. go:nosplit

func lockRankMayTraceFlush()

lockVerifyMSize function #

func lockVerifyMSize()

lockVerifyMSize function #

func lockVerifyMSize()

lockVerifyMSize function #

func lockVerifyMSize()

lockVerifyMSize function #

func lockVerifyMSize()

lockVerifyMSize function #

lockVerifyMSize confirms that we can recreate the low bits of the M pointer.

func lockVerifyMSize()

lockWithRank function #

func lockWithRank(l *mutex, rank lockRank)

lockWithRank function #

lockWithRank is like lock(l), but allows the caller to specify a lock rank when acquiring a non-static lock. Note that we need to be careful about stack splits: This function is not nosplit, thus it may split at function entry. This may introduce a new edge in the lock order, but it is no different from any other (nosplit) call before this call (including the call to lock() itself). However, we switch to the systemstack to record the lock held to ensure that we record an accurate lock ordering. e.g., without systemstack, a stack split on entry to lock2() would record stack split locks as taken after l, even though l is not actually locked yet.

func lockWithRank(l *mutex, rank lockRank)

lockWithRankMayAcquire function #

nosplit because it may be called from nosplit contexts. go:nosplit

func lockWithRankMayAcquire(l *mutex, rank lockRank)

lockWithRankMayAcquire function #

This function may be called in nosplit context and thus must be nosplit. go:nosplit

func lockWithRankMayAcquire(l *mutex, rank lockRank)

lockedOSThread function #

func lockedOSThread() bool

lockextra function #

lockextra locks the extra list and returns the list head. The caller must unlock the list by storing a new list head to extram. If nilokay is true, then lockextra will return a nil list head if that's what it finds. If nilokay is false, lockextra will keep waiting until the list head is no longer nil. go:nosplit

func lockextra(nilokay bool) *m

lookup method #

lookup returns &s[idx].

func (s spanSetSpinePointer) lookup(idx uintptr) **ast.IndexExpr

lowerASCII function #

func lowerASCII(c byte) byte

lr method #

func (c *sigctxt) lr() uint64

lr method #

func (c *sigctxt) lr() uint32

lr method #

AMD64 does not have link register, so this returns 0.

func (c *context) lr() uintptr

lr method #

func (c *sigctxt) lr() uintptr

lr method #

func (c *sigctxt) lr() uint64

lr method #

func (c *sigctxt) lr() uintptr

lr method #

386 does not have link register, so this returns 0.

func (c *context) lr() uintptr

lr method #

func (c *sigctxt) lr() uint64

lr method #

func (c *sigctxt) lr() uint32

lr method #

func (c *sigctxt) lr() uintptr

lr method #

func (c *sigctxt) lr() uint32

lr method #

func (c *context) lr() uintptr

lr method #

func (c *sigctxt) lr() uint32

lr method #

func (c *sigctxt) lr() uint64

lr method #

func (c *sigctxt) lr() uint64

lr method #

func (c *context) lr() uintptr

lwp_create function #

go:noescape

func lwp_create(ctxt unsafe.Pointer, flags uintptr, lwpid unsafe.Pointer) int32

lwp_create function #

go:noescape

func lwp_create(param *lwpparams) int32

lwp_gettid function #

func lwp_gettid() int32

lwp_kill function #

func lwp_kill(tid int32, sig int)

lwp_kill function #

func lwp_kill(pid int32, tid int32, sig int)

lwp_mcontext_init function #

func lwp_mcontext_init(mc *mcontextt, stk unsafe.Pointer, mp *m, gp *g, fn uintptr)

lwp_mcontext_init function #

func lwp_mcontext_init(mc *mcontextt, stk unsafe.Pointer, mp *m, gp *g, fn uintptr)

lwp_mcontext_init function #

func lwp_mcontext_init(mc *mcontextt, stk unsafe.Pointer, mp *m, gp *g, fn uintptr)

lwp_mcontext_init function #

func lwp_mcontext_init(mc *mcontextt, stk unsafe.Pointer, mp *m, gp *g, fn uintptr)

lwp_park function #

go:noescape

func lwp_park(clockid int32, flags int32, ts *timespec, unpark int32, hint unsafe.Pointer, unparkhint unsafe.Pointer) int32

lwp_self function #

func lwp_self() int32

lwp_start function #

func lwp_start(uintptr)

lwp_tramp function #

func lwp_tramp()

lwp_unpark function #

go:noescape

func lwp_unpark(lwp int32, hint unsafe.Pointer) int32

mPark function #

mPark causes a thread to park itself, returning once woken. go:nosplit

func mPark()

mProfStackInit function #

mProfStackInit is used to eagerly initialize stack trace buffers for profiling. Lazy allocation would have to deal with reentrancy issues in malloc and runtime locks for mLockProfile. TODO(mknyszek): Implement lazy allocation if this becomes a problem.

func mProfStackInit(mp *m)

mProf_Flush function #

mProf_Flush flushes the events from the current heap profiling cycle into the active profile. After this it is safe to start a new heap profiling cycle with mProf_NextCycle. This is called by GC after mark termination starts the world. In contrast with mProf_NextCycle, this is somewhat expensive, but safe to do concurrently.

func mProf_Flush()

mProf_FlushLocked function #

mProf_FlushLocked flushes the events from the heap profiling cycle at index into the active profile. The caller must hold the lock for the active profile (profMemActiveLock) and for the profiling cycle at index (profMemFutureLock[index]).

func mProf_FlushLocked(index uint32)

mProf_Free function #

Called when freeing a profiled block.

func mProf_Free(b *bucket, size uintptr)

mProf_Malloc function #

Called by malloc to record a profiled block.

func mProf_Malloc(mp *m, p unsafe.Pointer, size uintptr)

mProf_NextCycle function #

mProf_NextCycle publishes the next heap profile cycle and creates a fresh heap profile cycle. This operation is fast and can be done during STW. The caller must call mProf_Flush before calling mProf_NextCycle again. This is called by mark termination during STW so allocations and frees after the world is started again count towards a new heap profiling cycle.

func mProf_NextCycle()

mProf_PostSweep function #

mProf_PostSweep records that all sweep frees for this GC cycle have completed. This has the effect of publishing the heap profile snapshot as of the last mark termination without advancing the heap profile cycle.

func mProf_PostSweep()

mReserveID function #

mReserveID returns the next ID to use for a new m. This new m is immediately considered 'running' by checkdead. sched.lock must be held.

func mReserveID() int64

mStackIsSystemAllocated function #

mStackIsSystemAllocated indicates whether this runtime starts on a system-allocated stack.

func mStackIsSystemAllocated() bool

mach_vm_region function #

mach_vm_region is used to obtain virtual memory mappings for use by the profiling system and is only exported to runtime/pprof. It is restricted to obtaining mappings for the current process. go:linkname mach_vm_region runtime/pprof.mach_vm_region

func mach_vm_region(address *uint64, region_size *uint64, info unsafe.Pointer) int32

mach_vm_region_trampoline function #

func mach_vm_region_trampoline()

madvise function #

go:nosplit go:cgo_unsafe_args

func madvise(addr unsafe.Pointer, n uintptr, flags int32)

madvise function #

return value is only set on linux to be used in osinit().

func madvise(addr unsafe.Pointer, n uintptr, flags int32) int32

madvise function #

go:nosplit

func madvise(addr unsafe.Pointer, n uintptr, flags int32)

madvise function #

go:nosplit

func madvise(addr unsafe.Pointer, n uintptr, flags int32)

madvise function #

return value is only set on linux to be used in osinit().

func madvise(addr unsafe.Pointer, n uintptr, flags int32) int32

madvise function #

go:nosplit go:cgo_unsafe_args

func madvise(addr unsafe.Pointer, n uintptr, flags int32)

madvise_trampoline function #

func madvise_trampoline()

madvise_trampoline function #

func madvise_trampoline()

main function #

The main goroutine.

func main()

main_main function #

go:linkname main_main main.main

func main_main()

makeAddrRange function #

makeAddrRange creates a new address range from two virtual addresses. Throws if the base and limit are not in the same memory segment.

func makeAddrRange(base uintptr, limit uintptr) addrRange

makeArg method #

makeArg converts pd to an interface{}. makeArg does not do any allocation. Normally, such a conversion requires an allocation because pointers to types which embed internal/runtime/sys.NotInHeap (which pollDesc is) must be stored in interfaces indirectly. See issue 42076.

func (pd *pollDesc) makeArg() (i any)

makeBucketArray function #

makeBucketArray initializes a backing array for map buckets. 1<

func makeBucketArray(t *maptype, b uint8, dirtyalloc unsafe.Pointer) (buckets unsafe.Pointer, nextOverflow *bmap)

makeHeadTailIndex function #

makeHeadTailIndex creates a headTailIndex value from a separate head and tail.

func makeHeadTailIndex(head uint32, tail uint32) headTailIndex

makeLimiterEventStamp function #

makeLimiterEventStamp creates a new stamp from the event type and the current timestamp.

func makeLimiterEventStamp(typ limiterEventType, now int64) limiterEventStamp

makeProfStack function #

makeProfStack returns a buffer large enough to hold a maximum-sized stack trace.

func makeProfStack() []uintptr

makeProfStackFP function #

makeProfStackFP creates a buffer large enough to hold a maximum-sized stack trace as well as any additional frames needed for frame pointer unwinding with delayed inline expansion.

func makeProfStackFP() []uintptr

makeSpanClass function #

func makeSpanClass(sizeclass uint8, noscan bool) spanClass

makeStatDepSet function #

makeStatDepSet creates a new statDepSet from a list of statDeps.

func makeStatDepSet(deps ...statDep) statDepSet

makeTraceFrame function #

makeTraceFrame sets up a traceFrame for a frame.

func makeTraceFrame(gen uintptr, f Frame) traceFrame

makeTraceFrames function #

makeTraceFrames returns the frames corresponding to pcs. It may allocate and may emit trace events.

func makeTraceFrames(gen uintptr, pcs []uintptr) []traceFrame

makechan function #

func makechan(t *chantype, size int) *hchan

makechan64 function #

func makechan64(t *chantype, size int64) *hchan

makeheapobjbv function #

func makeheapobjbv(p uintptr, size uintptr) bitvector

makemap function #

makemap implements Go map creation for make(map[k]v, hint). If the compiler has determined that the map or the first bucket can be created on the stack, h and/or bucket may be non-nil. If h != nil, the map can be created directly in h. If h.buckets != nil, bucket pointed to can be used as the first bucket. makemap should be an internal detail, but widely used packages access it using linkname. Notable members of the hall of shame include: - github.com/ugorji/go/codec Do not remove or change the type signature. See go.dev/issue/67401. go:linkname makemap

func makemap(t *maptype, hint int, h *hmap) *hmap

makemap function #

makemap implements Go map creation for make(map[k]v, hint). If the compiler has determined that the map or the first group can be created on the stack, m and optionally m.dirPtr may be non-nil. If m != nil, the map can be created directly in m. If m.dirPtr != nil, it points to a group usable for a small map. makemap should be an internal detail, but widely used packages access it using linkname. Notable members of the hall of shame include: - github.com/ugorji/go/codec Do not remove or change the type signature. See go.dev/issue/67401. go:linkname makemap

func makemap(t *abi.SwissMapType, hint int, m *maps.Map) *maps.Map

makemap64 function #

func makemap64(t *abi.SwissMapType, hint int64, m *maps.Map) *maps.Map

makemap64 function #

func makemap64(t *maptype, hint int64, h *hmap) *hmap

makemap_small function #

makemap_small implements Go map creation for make(map[k]v) and make(map[k]v, hint) when hint is known to be at most abi.SwissMapGroupSlots at compile time and the map needs to be allocated on the heap. makemap_small should be an internal detail, but widely used packages access it using linkname. Notable members of the hall of shame include: - github.com/bytedance/sonic Do not remove or change the type signature. See go.dev/issue/67401. go:linkname makemap_small

func makemap_small() *maps.Map

makemap_small function #

makemap_small implements Go map creation for make(map[k]v) and make(map[k]v, hint) when hint is known to be at most bucketCnt at compile time and the map needs to be allocated on the heap. makemap_small should be an internal detail, but widely used packages access it using linkname. Notable members of the hall of shame include: - github.com/bytedance/sonic Do not remove or change the type signature. See go.dev/issue/67401. go:linkname makemap_small

func makemap_small() *hmap

makeslice function #

makeslice should be an internal detail, but widely used packages access it using linkname. Notable members of the hall of shame include: - github.com/bytedance/sonic Do not remove or change the type signature. See go.dev/issue/67401. go:linkname makeslice

func makeslice(et *_type, len int, cap int) unsafe.Pointer

makeslice64 function #

func makeslice64(et *_type, len64 int64, cap64 int64) unsafe.Pointer

makeslicecopy function #

makeslicecopy allocates a slice of "tolen" elements of type "et", then copies "fromlen" elements of type "et" into that new allocation from "from".

func makeslicecopy(et *_type, tolen int, fromlen int, from unsafe.Pointer) unsafe.Pointer

malg function #

Allocate a new g, with a stack big enough for stacksize bytes.

func malg(stacksize int32) *g

malloc function #

go:nosplit

func malloc(size uintptr) unsafe.Pointer

mallocgc function #

Allocate an object of size bytes. Small objects are allocated from the per-P cache's free lists. Large objects (> 32 kB) are allocated straight from the heap. mallocgc should be an internal detail, but widely used packages access it using linkname. Notable members of the hall of shame include: - github.com/bytedance/gopkg - github.com/bytedance/sonic - github.com/cloudwego/frugal - github.com/cockroachdb/cockroach - github.com/cockroachdb/pebble - github.com/ugorji/go/codec Do not remove or change the type signature. See go.dev/issue/67401. go:linkname mallocgc

func mallocgc(size uintptr, typ *_type, needzero bool) unsafe.Pointer

mallocgcLarge function #

func mallocgcLarge(size uintptr, typ *_type, needzero bool) (unsafe.Pointer, uintptr)

mallocgcSmallNoscan function #

func mallocgcSmallNoscan(size uintptr, typ *_type, needzero bool) (unsafe.Pointer, uintptr)

mallocgcSmallScanHeader function #

func mallocgcSmallScanHeader(size uintptr, typ *_type, needzero bool) (unsafe.Pointer, uintptr)

mallocgcSmallScanNoHeader function #

func mallocgcSmallScanNoHeader(size uintptr, typ *_type, needzero bool) (unsafe.Pointer, uintptr)

mallocgcTiny function #

func mallocgcTiny(size uintptr, typ *_type, needzero bool) (unsafe.Pointer, uintptr)

mallocinit function #

func mallocinit()

manual method #

manual returns true if the span allocation is manually managed.

func (s spanAllocType) manual() bool

mapIterNext function #

mapIterNext performs the next step of iteration. Afterwards, the next key/elem are in it.Key()/it.Elem().

func mapIterNext(it *maps.Iter)

mapIterStart function #

mapIterStart initializes the Iter struct used for ranging over maps and performs the first step of iteration. The Iter struct pointed to by 'it' is allocated on the stack by the compilers order pass or on the heap by reflect. Both need to have zeroed it since the struct contains pointers.

func mapIterStart(t *abi.SwissMapType, m *maps.Map, it *maps.Iter)

mapKeyError function #

func mapKeyError(t *maptype, p unsafe.Pointer) error

mapKeyError2 function #

func mapKeyError2(t *_type, p unsafe.Pointer) error

mapaccess1 function #

mapaccess1 returns a pointer to h[key]. Never returns nil, instead it will return a reference to the zero object for the elem type if the key is not in the map. NOTE: The returned pointer may keep the whole map live, so don't hold onto it for very long.

func mapaccess1(t *maptype, h *hmap, key unsafe.Pointer) unsafe.Pointer

mapaccess1 function #

mapaccess1 returns a pointer to h[key]. Never returns nil, instead it will return a reference to the zero object for the elem type if the key is not in the map. NOTE: The returned pointer may keep the whole map live, so don't hold onto it for very long. mapaccess1 is pushed from internal/runtime/maps. We could just call it, but we want to avoid one layer of call. go:linkname mapaccess1

func mapaccess1(t *abi.SwissMapType, m *maps.Map, key unsafe.Pointer) unsafe.Pointer

mapaccess1_fast32 function #

func mapaccess1_fast32(t *maptype, h *hmap, key uint32) unsafe.Pointer

mapaccess1_fast32 function #

go:linkname mapaccess1_fast32

func mapaccess1_fast32(t *abi.SwissMapType, m *maps.Map, key uint32) unsafe.Pointer

mapaccess1_fast64 function #

func mapaccess1_fast64(t *maptype, h *hmap, key uint64) unsafe.Pointer

mapaccess1_fast64 function #

go:linkname mapaccess1_fast64

func mapaccess1_fast64(t *abi.SwissMapType, m *maps.Map, key uint64) unsafe.Pointer

mapaccess1_faststr function #

go:linkname mapaccess1_faststr

func mapaccess1_faststr(t *abi.SwissMapType, m *maps.Map, ky string) unsafe.Pointer

mapaccess1_faststr function #

func mapaccess1_faststr(t *maptype, h *hmap, ky string) unsafe.Pointer

mapaccess1_fat function #

func mapaccess1_fat(t *abi.SwissMapType, m *maps.Map, key unsafe.Pointer, zero unsafe.Pointer) unsafe.Pointer

mapaccess1_fat function #

func mapaccess1_fat(t *maptype, h *hmap, key unsafe.Pointer, zero unsafe.Pointer) unsafe.Pointer

mapaccess2 function #

mapaccess2 should be an internal detail, but widely used packages access it using linkname. Notable members of the hall of shame include: - github.com/ugorji/go/codec Do not remove or change the type signature. See go.dev/issue/67401. go:linkname mapaccess2

func mapaccess2(t *abi.SwissMapType, m *maps.Map, key unsafe.Pointer) (unsafe.Pointer, bool)

mapaccess2 function #

mapaccess2 should be an internal detail, but widely used packages access it using linkname. Notable members of the hall of shame include: - github.com/ugorji/go/codec Do not remove or change the type signature. See go.dev/issue/67401. go:linkname mapaccess2

func mapaccess2(t *maptype, h *hmap, key unsafe.Pointer) (unsafe.Pointer, bool)

mapaccess2_fast32 function #

mapaccess2_fast32 should be an internal detail, but widely used packages access it using linkname. Notable members of the hall of shame include: - github.com/ugorji/go/codec Do not remove or change the type signature. See go.dev/issue/67401. go:linkname mapaccess2_fast32

func mapaccess2_fast32(t *abi.SwissMapType, m *maps.Map, key uint32) (unsafe.Pointer, bool)

mapaccess2_fast32 function #

mapaccess2_fast32 should be an internal detail, but widely used packages access it using linkname. Notable members of the hall of shame include: - github.com/ugorji/go/codec Do not remove or change the type signature. See go.dev/issue/67401. go:linkname mapaccess2_fast32

func mapaccess2_fast32(t *maptype, h *hmap, key uint32) (unsafe.Pointer, bool)

mapaccess2_fast64 function #

mapaccess2_fast64 should be an internal detail, but widely used packages access it using linkname. Notable members of the hall of shame include: - github.com/ugorji/go/codec Do not remove or change the type signature. See go.dev/issue/67401. go:linkname mapaccess2_fast64

func mapaccess2_fast64(t *abi.SwissMapType, m *maps.Map, key uint64) (unsafe.Pointer, bool)

mapaccess2_fast64 function #

mapaccess2_fast64 should be an internal detail, but widely used packages access it using linkname. Notable members of the hall of shame include: - github.com/ugorji/go/codec Do not remove or change the type signature. See go.dev/issue/67401. go:linkname mapaccess2_fast64

func mapaccess2_fast64(t *maptype, h *hmap, key uint64) (unsafe.Pointer, bool)

mapaccess2_faststr function #

mapaccess2_faststr should be an internal detail, but widely used packages access it using linkname. Notable members of the hall of shame include: - github.com/ugorji/go/codec Do not remove or change the type signature. See go.dev/issue/67401. go:linkname mapaccess2_faststr

func mapaccess2_faststr(t *abi.SwissMapType, m *maps.Map, ky string) (unsafe.Pointer, bool)

mapaccess2_faststr function #

mapaccess2_faststr should be an internal detail, but widely used packages access it using linkname. Notable members of the hall of shame include: - github.com/ugorji/go/codec Do not remove or change the type signature. See go.dev/issue/67401. go:linkname mapaccess2_faststr

func mapaccess2_faststr(t *maptype, h *hmap, ky string) (unsafe.Pointer, bool)

mapaccess2_fat function #

func mapaccess2_fat(t *maptype, h *hmap, key unsafe.Pointer, zero unsafe.Pointer) (unsafe.Pointer, bool)

mapaccess2_fat function #

func mapaccess2_fat(t *abi.SwissMapType, m *maps.Map, key unsafe.Pointer, zero unsafe.Pointer) (unsafe.Pointer, bool)

mapaccessK function #

returns both key and elem. Used by map iterator.

func mapaccessK(t *maptype, h *hmap, key unsafe.Pointer) (unsafe.Pointer, unsafe.Pointer)

mapassign function #

Like mapaccess, but allocates a slot for the key if it is not present in the map. mapassign should be an internal detail, but widely used packages access it using linkname. Notable members of the hall of shame include: - github.com/bytedance/sonic - github.com/RomiChan/protobuf - github.com/segmentio/encoding - github.com/ugorji/go/codec Do not remove or change the type signature. See go.dev/issue/67401. go:linkname mapassign

func mapassign(t *maptype, h *hmap, key unsafe.Pointer) unsafe.Pointer

mapassign function #

mapassign is pushed from internal/runtime/maps. We could just call it, but we want to avoid one layer of call. mapassign should be an internal detail, but widely used packages access it using linkname. Notable members of the hall of shame include: - github.com/bytedance/sonic - github.com/RomiChan/protobuf - github.com/segmentio/encoding - github.com/ugorji/go/codec Do not remove or change the type signature. See go.dev/issue/67401. go:linkname mapassign

func mapassign(t *abi.SwissMapType, m *maps.Map, key unsafe.Pointer) unsafe.Pointer

mapassign_fast32 function #

mapassign_fast32 should be an internal detail, but widely used packages access it using linkname. Notable members of the hall of shame include: - github.com/bytedance/sonic - github.com/ugorji/go/codec Do not remove or change the type signature. See go.dev/issue/67401. go:linkname mapassign_fast32

func mapassign_fast32(t *abi.SwissMapType, m *maps.Map, key uint32) unsafe.Pointer

mapassign_fast32 function #

mapassign_fast32 should be an internal detail, but widely used packages access it using linkname. Notable members of the hall of shame include: - github.com/bytedance/sonic - github.com/ugorji/go/codec Do not remove or change the type signature. See go.dev/issue/67401. go:linkname mapassign_fast32

func mapassign_fast32(t *maptype, h *hmap, key uint32) unsafe.Pointer

mapassign_fast32ptr function #

mapassign_fast32ptr should be an internal detail, but widely used packages access it using linkname. Notable members of the hall of shame include: - github.com/ugorji/go/codec Do not remove or change the type signature. See go.dev/issue/67401. go:linkname mapassign_fast32ptr

func mapassign_fast32ptr(t *maptype, h *hmap, key unsafe.Pointer) unsafe.Pointer

mapassign_fast32ptr function #

mapassign_fast32ptr should be an internal detail, but widely used packages access it using linkname. Notable members of the hall of shame include: - github.com/ugorji/go/codec Do not remove or change the type signature. See go.dev/issue/67401. go:linkname mapassign_fast32ptr

func mapassign_fast32ptr(t *abi.SwissMapType, m *maps.Map, key unsafe.Pointer) unsafe.Pointer

mapassign_fast64 function #

mapassign_fast64 should be an internal detail, but widely used packages access it using linkname. Notable members of the hall of shame include: - github.com/bytedance/sonic - github.com/ugorji/go/codec Do not remove or change the type signature. See go.dev/issue/67401. go:linkname mapassign_fast64

func mapassign_fast64(t *abi.SwissMapType, m *maps.Map, key uint64) unsafe.Pointer

mapassign_fast64 function #

mapassign_fast64 should be an internal detail, but widely used packages access it using linkname. Notable members of the hall of shame include: - github.com/bytedance/sonic - github.com/ugorji/go/codec Do not remove or change the type signature. See go.dev/issue/67401. go:linkname mapassign_fast64

func mapassign_fast64(t *maptype, h *hmap, key uint64) unsafe.Pointer

mapassign_fast64ptr function #

mapassign_fast64ptr should be an internal detail, but widely used packages access it using linkname. Notable members of the hall of shame include: - github.com/bytedance/sonic - github.com/ugorji/go/codec Do not remove or change the type signature. See go.dev/issue/67401. go:linkname mapassign_fast64ptr

func mapassign_fast64ptr(t *abi.SwissMapType, m *maps.Map, key unsafe.Pointer) unsafe.Pointer

mapassign_fast64ptr function #

mapassign_fast64ptr should be an internal detail, but widely used packages access it using linkname. Notable members of the hall of shame include: - github.com/bytedance/sonic - github.com/ugorji/go/codec Do not remove or change the type signature. See go.dev/issue/67401. go:linkname mapassign_fast64ptr

func mapassign_fast64ptr(t *maptype, h *hmap, key unsafe.Pointer) unsafe.Pointer

mapassign_faststr function #

mapassign_faststr should be an internal detail, but widely used packages access it using linkname. Notable members of the hall of shame include: - github.com/bytedance/sonic - github.com/ugorji/go/codec Do not remove or change the type signature. See go.dev/issue/67401. go:linkname mapassign_faststr

func mapassign_faststr(t *abi.SwissMapType, m *maps.Map, s string) unsafe.Pointer

mapassign_faststr function #

mapassign_faststr should be an internal detail, but widely used packages access it using linkname. Notable members of the hall of shame include: - github.com/bytedance/sonic - github.com/ugorji/go/codec Do not remove or change the type signature. See go.dev/issue/67401. go:linkname mapassign_faststr

func mapassign_faststr(t *maptype, h *hmap, s string) unsafe.Pointer

mapclear function #

mapclear deletes all keys from a map. It is called by the compiler.

func mapclear(t *maptype, h *hmap)

mapclear function #

mapclear deletes all keys from a map.

func mapclear(t *abi.SwissMapType, m *maps.Map)

mapclone function #

mapclone for implementing maps.Clone go:linkname mapclone maps.clone

func mapclone(m any) any

mapclone function #

mapclone for implementing maps.Clone go:linkname mapclone maps.clone

func mapclone(m any) any

mapclone2 function #

func mapclone2(t *maptype, src *hmap) *hmap

mapclone2 function #

func mapclone2(t *abi.SwissMapType, src *maps.Map) *maps.Map

mapdelete function #

mapdelete should be an internal detail, but widely used packages access it using linkname. Notable members of the hall of shame include: - github.com/ugorji/go/codec Do not remove or change the type signature. See go.dev/issue/67401. go:linkname mapdelete

func mapdelete(t *maptype, h *hmap, key unsafe.Pointer)

mapdelete function #

mapdelete should be an internal detail, but widely used packages access it using linkname. Notable members of the hall of shame include: - github.com/ugorji/go/codec Do not remove or change the type signature. See go.dev/issue/67401. go:linkname mapdelete

func mapdelete(t *abi.SwissMapType, m *maps.Map, key unsafe.Pointer)

mapdelete_fast32 function #

func mapdelete_fast32(t *maptype, h *hmap, key uint32)

mapdelete_fast32 function #

go:linkname mapdelete_fast32

func mapdelete_fast32(t *abi.SwissMapType, m *maps.Map, key uint32)

mapdelete_fast64 function #

go:linkname mapdelete_fast64

func mapdelete_fast64(t *abi.SwissMapType, m *maps.Map, key uint64)

mapdelete_fast64 function #

func mapdelete_fast64(t *maptype, h *hmap, key uint64)

mapdelete_faststr function #

func mapdelete_faststr(t *maptype, h *hmap, ky string)

mapdelete_faststr function #

go:linkname mapdelete_faststr

func mapdelete_faststr(t *abi.SwissMapType, m *maps.Map, ky string)

mapinitnoop function #

mapinitnoop is a no-op function known the Go linker; if a given global map (of the right size) is determined to be dead, the linker will rewrite the relocation (from the package init func) from the outlined map init function to this symbol. Defined in assembly so as to avoid complications with instrumentation (coverage, etc).

func mapinitnoop()

mapinitnoop function #

mapinitnoop is a no-op function known the Go linker; if a given global map (of the right size) is determined to be dead, the linker will rewrite the relocation (from the package init func) from the outlined map init function to this symbol. Defined in assembly so as to avoid complications with instrumentation (coverage, etc).

func mapinitnoop()

mapiterinit function #

mapiterinit initializes the hiter struct used for ranging over maps. The hiter struct pointed to by 'it' is allocated on the stack by the compilers order pass or on the heap by reflect_mapiterinit. Both need to have zeroed hiter since the struct contains pointers. mapiterinit should be an internal detail, but widely used packages access it using linkname. Notable members of the hall of shame include: - github.com/bytedance/sonic - github.com/goccy/go-json - github.com/RomiChan/protobuf - github.com/segmentio/encoding - github.com/ugorji/go/codec - github.com/wI2L/jettison Do not remove or change the type signature. See go.dev/issue/67401. go:linkname mapiterinit

func mapiterinit(t *maptype, h *hmap, it *hiter)

mapiterinit function #

mapiterinit is a compatibility wrapper for map iterator for users of //go:linkname from before Go 1.24. It is not used by Go itself. New users should use reflect or the maps package. mapiterinit should be an internal detail, but widely used packages access it using linkname. Notable members of the hall of shame include: - github.com/bytedance/sonic - github.com/goccy/go-json - github.com/RomiChan/protobuf - github.com/segmentio/encoding - github.com/ugorji/go/codec - github.com/wI2L/jettison Do not remove or change the type signature. See go.dev/issue/67401. go:linkname mapiterinit

func mapiterinit(t *abi.SwissMapType, m *maps.Map, it *linknameIter)

mapiternext function #

mapiternext should be an internal detail, but widely used packages access it using linkname. Notable members of the hall of shame include: - github.com/bytedance/sonic - github.com/RomiChan/protobuf - github.com/segmentio/encoding - github.com/ugorji/go/codec - gonum.org/v1/gonum Do not remove or change the type signature. See go.dev/issue/67401. go:linkname mapiternext

func mapiternext(it *hiter)

mapiternext function #

mapiternext is a compatibility wrapper for map iterator for users of //go:linkname from before Go 1.24. It is not used by Go itself. New users should use reflect or the maps package. mapiternext should be an internal detail, but widely used packages access it using linkname. Notable members of the hall of shame include: - github.com/bytedance/sonic - github.com/RomiChan/protobuf - github.com/segmentio/encoding - github.com/ugorji/go/codec - gonum.org/v1/gonum Do not remove or change the type signature. See go.dev/issue/67401. go:linkname mapiternext

func mapiternext(it *linknameIter)

maps_fatal function #

go:linkname maps_fatal internal/runtime/maps.fatal

func maps_fatal(s string)

maps_mapKeyError function #

go:linkname maps_mapKeyError internal/runtime/maps.mapKeyError

func maps_mapKeyError(t *abi.SwissMapType, p unsafe.Pointer) error

maps_newarray function #

go:linkname maps_newarray internal/runtime/maps.newarray

func maps_newarray(typ *_type, n int) unsafe.Pointer

maps_newobject function #

go:linkname maps_newobject internal/runtime/maps.newobject

func maps_newobject(typ *_type) unsafe.Pointer

maps_rand function #

go:linkname maps_rand internal/runtime/maps.rand

func maps_rand() uint64

maps_typedmemclr function #

go:linkname maps_typedmemclr internal/runtime/maps.typedmemclr

func maps_typedmemclr(typ *_type, ptr unsafe.Pointer)

maps_typedmemmove function #

go:linkname maps_typedmemmove internal/runtime/maps.typedmemmove

func maps_typedmemmove(typ *_type, dst unsafe.Pointer, src unsafe.Pointer)

markBitsForAddr function #

func markBitsForAddr(p uintptr) markBits

markBitsForBase method #

func (s *mspan) markBitsForBase() markBits

markBitsForIndex method #

func (s *mspan) markBitsForIndex(objIndex uintptr) markBits

markBitsForSpan function #

markBitsForSpan returns the markBits for the span base address base.

func markBitsForSpan(base uintptr) (mbits markBits)

markDrained method #

markDrained marks the active sweep cycle as having drained all remaining work. This is safe to be called concurrently with all other methods of activeSweep, though may race. Returns true if this call was the one that actually performed the mark.

func (a *activeSweep) markDrained() bool

markWorkerStop method #

markWorkerStop must be called whenever a mark worker stops executing. It updates mark work accounting in the controller by a duration of work in nanoseconds and other bookkeeping. Safe to execute at any time.

func (c *gcControllerState) markWorkerStop(mode gcMarkWorkerMode, duration int64)

markroot function #

markroot scans the i'th root. Preemption must be disabled (because this uses a gcWork). Returns the amount of GC work credit produced by the operation. If flushBgCredit is true, then that credit is also flushed to the background credit pool. nowritebarrier is only advisory here. go:nowritebarrier

func markroot(gcw *gcWork, i uint32, flushBgCredit bool) int64

markrootBlock function #

markrootBlock scans the shard'th shard of the block of memory [b0, b0+n0), with the given pointer mask. Returns the amount of work done. go:nowritebarrier

func markrootBlock(b0 uintptr, n0 uintptr, ptrmask0 *uint8, gcw *gcWork, shard int) int64

markrootFreeGStacks function #

markrootFreeGStacks frees stacks of dead Gs. This does not free stacks of dead Gs cached on Ps, but having a few cached stacks around isn't a problem.

func markrootFreeGStacks()

markrootSpans function #

markrootSpans marks roots for one shard of markArenas. go:nowritebarrier

func markrootSpans(gcw *gcWork, shard int)

max method #

max extracts the max value from a packed sum.

func (p pallocSum) max() uint

maxSearchAddr function #

maxSearchAddr returns the maximum searchAddr value, which indicates that the heap has no free space. This function exists just to make it clear that this is the maximum address for the page allocator's search space. See maxOffAddr for details. It's a function (rather than a variable) because it needs to be usable before package runtime's dynamic initialization is complete. See #51913 for details.

func maxSearchAddr() offAddr

mayMoreStackMove function #

mayMoreStackMove is a maymorestack hook that forces stack movement at every possible point. See mayMoreStackPreempt. go:nosplit go:linkname mayMoreStackMove

func mayMoreStackMove()

mayMoreStackPreempt function #

mayMoreStackPreempt is a maymorestack hook that forces a preemption at every possible cooperative preemption point. This is valuable to apply to the runtime, which can be sensitive to preemption points. To apply this to all preemption points in the runtime and runtime-like code, use the following in bash or zsh: X=(-{gc,asm}flags={runtime/...,reflect,sync}=-d=maymorestack=runtime.mayMoreStackPreempt) GOFLAGS=${X[@]} This must be deeply nosplit because it is called from a function prologue before the stack is set up and because the compiler will call it from any splittable prologue (leading to infinite recursion). Ideally it should also use very little stack because the linker doesn't currently account for this in nosplit stack depth checking. Ensure mayMoreStackPreempt can be called for all ABIs. go:nosplit go:linkname mayMoreStackPreempt

func mayMoreStackPreempt()

maybeAdd method #

maybeAdd adds t to the local timers heap if it needs to be in a heap. The caller must not hold t's lock nor any timers heap lock. The caller probably just unlocked t, but that lock must be dropped in order to acquire a ts.lock, to avoid lock inversions. (timers.adjust holds ts.lock while acquiring each t's lock, so we cannot hold any t's lock while acquiring ts.lock). Strictly speaking it *might* be okay to hold t.lock and acquire ts.lock at the same time, because we know that t is not in any ts.heap, so nothing holding a ts.lock would be acquiring the t.lock at the same time, meaning there isn't a possible deadlock. But it is easier and safer not to be too clever and respect the static ordering. (If we don't, we have to change the static lock checking of t and ts.) Concurrent calls to time.Timer.Reset or blockTimerChan may result in concurrent calls to t.maybeAdd, so we cannot assume that t is not in a heap on entry to t.maybeAdd.

func (t *timer) maybeAdd()

maybeRunAsync method #

maybeRunAsync checks whether t needs to be triggered and runs it if so. The caller is responsible for locking the timer and for checking that we are running timers in async mode. If the timer needs to be run, maybeRunAsync will unlock and re-lock it. The timer is always locked on return.

func (t *timer) maybeRunAsync()

maybeRunChan method #

maybeRunChan checks whether the timer needs to run to send a value to its associated channel. If so, it does. The timer must not be locked.

func (t *timer) maybeRunChan()

maybeWakeLocked method #

maybeWakeLocked returns a g to wake if the group is durably blocked.

func (sg *synctestGroup) maybeWakeLocked() *g

mcall function #

mcall switches from the g to the g0 stack and invokes fn(g), where g is the goroutine that made the call. mcall saves g's current PC/SP in g->sched so that it can be restored later. It is up to fn to arrange for that later execution, typically by recording g in a data structure, causing something to call ready(g) later. mcall returns to the original goroutine g later, when g has been rescheduled. fn must not return at all; typically it ends by calling schedule, to let the m run other goroutines. mcall can only be called from g stacks (not g0, not gsignal). This must NOT be go:noescape: if fn is a stack-allocated closure, fn puts g on a run queue, and g executes before fn returns, the closure will be invalidated while it is still executing.

func mcall(fn func(*g))

mcommoninit function #

Pre-allocated ID may be passed as 'id', or omitted by passing -1.

func mcommoninit(mp *m, id int64)

mcount function #

func mcount() int32

mdestroy function #

Called from mexit, but not from dropm, to undo the effect of thread-owned resources in minit, semacreate, or elsewhere. Do not take locks after calling this. This always runs without a P, so //go:nowritebarrierrec is required. go:nowritebarrierrec

func mdestroy(mp *m)

mdestroy function #

Called from mexit, but not from dropm, to undo the effect of thread-owned resources in minit, semacreate, or elsewhere. Do not take locks after calling this. This always runs without a P, so //go:nowritebarrierrec is required. go:nowritebarrierrec

func mdestroy(mp *m)

mdestroy function #

Called from mexit, but not from dropm, to undo the effect of thread-owned resources in minit, semacreate, or elsewhere. Do not take locks after calling this. This always runs without a P, so //go:nowritebarrierrec is required. go:nowritebarrierrec

func mdestroy(mp *m)

mdestroy function #

Called from mexit, but not from dropm, to undo the effect of thread-owned resources in minit, semacreate, or elsewhere. Do not take locks after calling this. This always runs without a P, so //go:nowritebarrierrec is required. go:nowritebarrierrec

func mdestroy(mp *m)

mdestroy function #

Called from exitm, but not from drop, to undo the effect of thread-owned resources in minit, semacreate, or elsewhere. Do not take locks after calling this.

func mdestroy(mp *m)

mdestroy function #

Called from mexit, but not from dropm, to undo the effect of thread-owned resources in minit, semacreate, or elsewhere. Do not take locks after calling this. This always runs without a P, so //go:nowritebarrierrec is required. go:nowritebarrierrec

func mdestroy(mp *m)

mdestroy function #

Called from mexit, but not from dropm, to undo the effect of thread-owned resources in minit, semacreate, or elsewhere. Do not take locks after calling this. This always runs without a P, so //go:nowritebarrierrec is required. go:nowritebarrierrec

func mdestroy(mp *m)

mdestroy function #

Called from mexit, but not from dropm, to undo the effect of thread-owned resources in minit, semacreate, or elsewhere. Do not take locks after calling this. This always runs without a P, so //go:nowritebarrierrec is required. go:nowritebarrierrec go:nosplit

func mdestroy(mp *m)

mdestroy function #

Called from mexit, but not from dropm, to undo the effect of thread-owned resources in minit, semacreate, or elsewhere. Do not take locks after calling this. This always runs without a P, so //go:nowritebarrierrec is required. go:nowritebarrierrec

func mdestroy(mp *m)

mdestroy function #

Called from mexit, but not from dropm, to undo the effect of thread-owned resources in minit, semacreate, or elsewhere. Do not take locks after calling this. This always runs without a P, so //go:nowritebarrierrec is required. go:nowritebarrierrec

func mdestroy(mp *m)

mdestroy function #

Called from exitm, but not from drop, to undo the effect of thread-owned resources in minit, semacreate, or elsewhere. Do not take locks after calling this.

func mdestroy(mp *m)

mdump function #

func mdump(m *MemStats)

memAlloc function #

func memAlloc(n uintptr) unsafe.Pointer

memAllocNoGrow function #

func memAllocNoGrow(n uintptr) unsafe.Pointer

memCheck function #

func memCheck()

memFree function #

func memFree(ap unsafe.Pointer, n uintptr)

memProfileInternal function #

memProfileInternal returns the number of records n in the profile. If there are less than size records, copyFn is invoked for each record, and ok returns true. The linker set disableMemoryProfiling to true to disable memory profiling if this function is not reachable. Mark it noinline to ensure the symbol exists. (This function is big and normally not inlined anyway.) See also disableMemoryProfiling above and cmd/link/internal/ld/lib.go:linksetup. go:noinline

func memProfileInternal(size int, inuseZero bool, copyFn func(profilerecord.MemProfileRecord)) (n int, ok bool)

memRound function #

func memRound(p uintptr) uintptr

memclrHasPointers function #

memclrHasPointers clears n bytes of typed memory starting at ptr. The caller must ensure that the type of the object at ptr has pointers, usually by checking typ.PtrBytes. However, ptr does not have to point to the start of the allocation. memclrHasPointers should be an internal detail, but widely used packages access it using linkname. Notable members of the hall of shame include: - github.com/bytedance/sonic Do not remove or change the type signature. See go.dev/issue/67401. go:linkname memclrHasPointers go:nosplit

func memclrHasPointers(ptr unsafe.Pointer, n uintptr)

memclrNoHeapPointers function #

memclrNoHeapPointers clears n bytes starting at ptr. Usually you should use typedmemclr. memclrNoHeapPointers should be used only when the caller knows that *ptr contains no heap pointers because either: *ptr is initialized memory and its type is pointer-free, or *ptr is uninitialized memory (e.g., memory that's being reused for a new allocation) and hence contains only "junk". memclrNoHeapPointers ensures that if ptr is pointer-aligned, and n is a multiple of the pointer size, then any pointer-aligned, pointer-sized portion is cleared atomically. Despite the function name, this is necessary because this function is the underlying implementation of typedmemclr and memclrHasPointers. See the doc of memmove for more details. The (CPU-specific) implementations of this function are in memclr_*.s. memclrNoHeapPointers should be an internal detail, but widely used packages access it using linkname. Notable members of the hall of shame include: - github.com/bytedance/sonic - github.com/chenzhuoyu/iasm - github.com/dgraph-io/ristretto - github.com/outcaste-io/ristretto Do not remove or change the type signature. See go.dev/issue/67401. go:linkname memclrNoHeapPointers go:noescape

func memclrNoHeapPointers(ptr unsafe.Pointer, n uintptr)

memclrNoHeapPointersChunked function #

memclrNoHeapPointersChunked repeatedly calls memclrNoHeapPointers on chunks of the buffer to be zeroed, with opportunities for preemption along the way. memclrNoHeapPointers contains no safepoints and also cannot be preemptively scheduled, so this provides a still-efficient block copy that can also be preempted on a reasonable granularity. Use this with care; if the data being cleared is tagged to contain pointers, this allows the GC to run before it is all cleared.

func memclrNoHeapPointersChunked(size uintptr, x unsafe.Pointer)

memequal function #

in internal/bytealg/equal_*.s memequal should be an internal detail, but widely used packages access it using linkname. Notable members of the hall of shame include: - github.com/bytedance/sonic Do not remove or change the type signature. See go.dev/issue/67401. go:linkname memequal go:noescape

func memequal(a unsafe.Pointer, b unsafe.Pointer, size uintptr) bool

memequal0 function #

func memequal0(p unsafe.Pointer, q unsafe.Pointer) bool

memequal128 function #

func memequal128(p unsafe.Pointer, q unsafe.Pointer) bool

memequal16 function #

func memequal16(p unsafe.Pointer, q unsafe.Pointer) bool

memequal32 function #

func memequal32(p unsafe.Pointer, q unsafe.Pointer) bool

memequal64 function #

func memequal64(p unsafe.Pointer, q unsafe.Pointer) bool

memequal8 function #

func memequal8(p unsafe.Pointer, q unsafe.Pointer) bool

memequal_varlen function #

func memequal_varlen(a unsafe.Pointer, b unsafe.Pointer) bool

memhash function #

memhash should be an internal detail, but widely used packages access it using linkname. Notable members of the hall of shame include: - github.com/aacfactory/fns - github.com/dgraph-io/ristretto - github.com/minio/simdjson-go - github.com/nbd-wtf/go-nostr - github.com/outcaste-io/ristretto - github.com/puzpuzpuz/xsync/v2 - github.com/puzpuzpuz/xsync/v3 - github.com/authzed/spicedb - github.com/pingcap/badger Do not remove or change the type signature. See go.dev/issue/67401. go:linkname memhash

func memhash(p unsafe.Pointer, h uintptr, s uintptr) uintptr

memhash0 function #

func memhash0(p unsafe.Pointer, h uintptr) uintptr

memhash128 function #

func memhash128(p unsafe.Pointer, h uintptr) uintptr

memhash16 function #

func memhash16(p unsafe.Pointer, h uintptr) uintptr

memhash32 function #

func memhash32(p unsafe.Pointer, h uintptr) uintptr

memhash32Fallback function #

func memhash32Fallback(p unsafe.Pointer, seed uintptr) uintptr

memhash32Fallback function #

func memhash32Fallback(p unsafe.Pointer, seed uintptr) uintptr

memhash64 function #

func memhash64(p unsafe.Pointer, h uintptr) uintptr

memhash64Fallback function #

func memhash64Fallback(p unsafe.Pointer, seed uintptr) uintptr

memhash64Fallback function #

func memhash64Fallback(p unsafe.Pointer, seed uintptr) uintptr

memhash8 function #

func memhash8(p unsafe.Pointer, h uintptr) uintptr

memhashFallback function #

func memhashFallback(p unsafe.Pointer, seed uintptr, s uintptr) uintptr

memhashFallback function #

func memhashFallback(p unsafe.Pointer, seed uintptr, s uintptr) uintptr

memhash_varlen function #

go:nosplit

func memhash_varlen(p unsafe.Pointer, h uintptr) uintptr

memmove function #

memmove copies n bytes from "from" to "to". memmove ensures that any pointer in "from" is written to "to" with an indivisible write, so that racy reads cannot observe a half-written pointer. This is necessary to prevent the garbage collector from observing invalid pointers, and differs from memmove in unmanaged languages. However, memmove is only required to do this if "from" and "to" may contain pointers, which can only be the case if "from", "to", and "n" are all be word-aligned. Implementations are in memmove_*.s. Outside assembly calls memmove. memmove should be an internal detail, but widely used packages access it using linkname. Notable members of the hall of shame include: - github.com/bytedance/sonic - github.com/cloudwego/dynamicgo - github.com/ebitengine/purego - github.com/tetratelabs/wazero - github.com/ugorji/go/codec - gvisor.dev/gvisor - github.com/sagernet/gvisor Do not remove or change the type signature. See go.dev/issue/67401. go:linkname memmove go:noescape

func memmove(to unsafe.Pointer, from unsafe.Pointer, n uintptr)

memoryLimitHeapGoal method #

memoryLimitHeapGoal returns a heap goal derived from memoryLimit.

func (c *gcControllerState) memoryLimitHeapGoal() uint64

merge method #

merge adds in the deltas from b into a.

func (a *heapStatsDelta) merge(b *heapStatsDelta)

mergeSummaries function #

mergeSummaries merges consecutive summaries which may each represent at most 1 << logMaxPagesPerSum pages each together into one.

func mergeSummaries(sums []pallocSum, logMaxPagesPerSum uint) pallocSum

metricsLock function #

func metricsLock()

metricsUnlock function #

func metricsUnlock()

mexit function #

mexit tears down and exits the current thread. Don't call this directly to exit the thread, since it must run at the top of the thread stack. Instead, use gogo(&gp.m.g0.sched) to unwind the stack to the point that exits the thread. It is entered with m.p != nil, so write barriers are allowed. It will release the P before exiting. go:yeswritebarrierrec

func mexit(osStack bool)

mget function #

Try to get an m from midle list. sched.lock must be held. May run during STW, so write barriers are not allowed. go:nowritebarrierrec

func mget() *m

mincore function #

func mincore(addr unsafe.Pointer, n uintptr, dst *byte) int32

minit function #

Called to initialize a new m (including the bootstrap m). Called on the new thread, cannot allocate memory.

func minit()

minit function #

Called to initialize a new m (including the bootstrap m). Called on the new thread, can not allocate memory.

func minit()

minit function #

Called to initialize a new m (including the bootstrap m). Called on the new thread, cannot allocate memory.

func minit()

minit function #

func minit()

minit function #

Called to initialize a new m (including the bootstrap m). Called on the new thread, cannot allocate memory.

func minit()

minit function #

Called to initialize a new m (including the bootstrap m). Called on the new thread, cannot allocate memory.

func minit()

minit function #

Called to initialize a new m (including the bootstrap m). Called on the new thread, cannot allocate memory.

func minit()

minit function #

Called to initialize a new m (including the bootstrap m). Called on the new thread, cannot allocate Go memory.

func minit()

minit function #

Called to initialize a new m (including the bootstrap m). Called on the new thread, cannot allocate memory.

func minit()

minit function #

Called to initialize a new m (including the bootstrap m). Called on the new thread, cannot allocate memory.

func minit()

minit function #

Called to initialize a new m (including the bootstrap m). Called on the new thread, cannot allocate memory.

func minit()

minitSignalMask function #

minitSignalMask is called when initializing a new m to set the thread's signal mask. When this is called all signals have been blocked for the thread. This starts with m.sigmask, which was set either from initSigmask for a newly created thread or by calling sigsave if this is a non-Go thread calling a Go function. It removes all essential signals from the mask, thus causing those signals to not be blocked. Then it sets the thread's signal mask. After this is called the thread can receive signals.

func minitSignalMask()

minitSignalStack function #

minitSignalStack is called when initializing a new m to set the alternate signal stack. If the alternate signal stack is not set for the thread (the normal case) then set the alternate signal stack to the gsignal stack. If the alternate signal stack is set for the thread (the case when a non-Go thread sets the alternate signal stack and then calls a Go function) then set the gsignal stack to the alternate signal stack. We also set the alternate signal stack to the gsignal stack if cgo is not used (regardless of whether it is already set). Record which choice was made in newSigstack, so that it can be undone in unminit.

func minitSignalStack()

minitSignals function #

minitSignals is called when initializing a new m to set the thread's alternate signal stack and signal mask.

func minitSignals()

miniterrno function #

errno address must be retrieved by calling _Errno libc function. This will return a pointer to errno.

func miniterrno()

miniterrno function #

func miniterrno()

mix function #

func mix(a uintptr, b uintptr) uintptr

mix32 function #

func mix32(a uint32, b uint32) (uint32, uint32)

mlock function #

go:nosplit go:cgo_unsafe_args

func mlock(addr unsafe.Pointer, n uintptr)

mlock_trampoline function #

func mlock_trampoline()

mmap function #

mmap is used to do low-level memory allocation via mmap. Don't allow stack splits, since this function (used by sysAlloc) is called in a lot of low-level parts of the runtime and callers often assume it won't acquire any locks. go:nosplit

func mmap(addr unsafe.Pointer, n uintptr, prot int32, flags int32, fd int32, off uint32) (unsafe.Pointer, int)

mmap function #

go:nosplit

func mmap(addr unsafe.Pointer, n uintptr, prot int32, flags int32, fd int32, off uint32) (unsafe.Pointer, int)

mmap function #

mmap calls the mmap system call. It is implemented in assembly. We only pass the lower 32 bits of file offset to the assembly routine; the higher bits (if required), should be provided by the assembly routine as 0. The err result is an OS error code such as ENOMEM.

func mmap(addr unsafe.Pointer, n uintptr, prot int32, flags int32, fd int32, off uint32) (p unsafe.Pointer, err int)

mmap function #

mmap is used to do low-level memory allocation via mmap. Don't allow stack splits, since this function (used by sysAlloc) is called in a lot of low-level parts of the runtime and callers often assume it won't acquire any locks. go:nosplit

func mmap(addr unsafe.Pointer, n uintptr, prot int32, flags int32, fd int32, off uint32) (unsafe.Pointer, int)

mmap function #

mmap calls the mmap system call. We only pass the lower 32 bits of file offset to the assembly routine; the higher bits (if required), should be provided by the assembly routine as 0. The err result is an OS error code such as ENOMEM. go:nosplit

func mmap(addr unsafe.Pointer, n uintptr, prot int32, flags int32, fd int32, off uint32) (unsafe.Pointer, int)

mmap function #

mmap is used to route the mmap system call through C code when using cgo, to support sanitizer interceptors. Don't allow stack splits, since this function (used by sysAlloc) is called in a lot of low-level parts of the runtime and callers often assume it won't acquire any locks. go:nosplit

func mmap(addr unsafe.Pointer, n uintptr, prot int32, flags int32, fd int32, off uint32) (unsafe.Pointer, int)

mmap function #

mmap calls the mmap system call. It is implemented in assembly. We only pass the lower 32 bits of file offset to the assembly routine; the higher bits (if required), should be provided by the assembly routine as 0. The err result is an OS error code such as ENOMEM.

func mmap(addr unsafe.Pointer, n uintptr, prot int32, flags int32, fd int32, off uint32) (p unsafe.Pointer, err int)

mmap_trampoline function #

func mmap_trampoline()

mmap_trampoline function #

func mmap_trampoline()

modify method #

modify modifies an existing timer. This is called by the netpoll code or time.Ticker.Reset or time.Timer.Reset. Reports whether the timer was modified before it was run. If f == nil, then t.f, t.arg, and t.seq are not modified.

func (t *timer) modify(when int64, period int64, f func(arg any, seq uintptr, delay int64), arg any, seq uintptr) bool

moduledataverify function #

func moduledataverify()

moduledataverify1 function #

moduledataverify1 should be an internal detail, but widely used packages access it using linkname. Notable members of the hall of shame include: - github.com/bytedance/sonic Do not remove or change the type signature. See go.dev/issues/67401. See go.dev/issues/71672. go:linkname moduledataverify1

func moduledataverify1(datap *moduledata)

modulesinit function #

modulesinit creates the active modules slice out of all loaded modules. When a module is first loaded by the dynamic linker, an .init_array function (written by cmd/link) is invoked to call addmoduledata, appending to the module to the linked list that starts with firstmoduledata. There are two times this can happen in the lifecycle of a Go program. First, if compiled with -linkshared, a number of modules built with -buildmode=shared can be loaded at program initialization. Second, a Go program can load a module while running that was built with -buildmode=plugin. After loading, this function is called which initializes the moduledata so it is usable by the GC and creates a new activeModules list. Only one goroutine may call modulesinit at a time.

func modulesinit()

monitorSuspendResume function #

func monitorSuspendResume()

morestack function #

func morestack()

morestack_noctxt function #

morestack_noctxt should be an internal detail, but widely used packages access it using linkname. Notable members of the hall of shame include: - github.com/bytedance/sonic Do not remove or change the type signature. See go.dev/issues/67401. See go.dev/issues/71672. go:linkname morestack_noctxt

func morestack_noctxt()

morestackc function #

This is exported as ABI0 via linkname so obj can call it. go:nosplit go:linkname morestackc

func morestackc()

moveToBmap function #

moveToBmap moves a bucket from src to dst. It returns the destination bucket or new destination bucket if it overflows and the pos that the next key/value will be written, if pos == bucketCnt means needs to written in overflow bucket.

func moveToBmap(t *maptype, h *hmap, dst *bmap, pos int, src *bmap) (*bmap, int)

mp method #

mp returns the memRecord associated with the memProfile bucket b.

func (b *bucket) mp() *memRecord

mpreinit function #

Called to initialize a new m (including the bootstrap m). Called on the parent thread (main thread in case of bootstrap), can allocate memory.

func mpreinit(mp *m)

mpreinit function #

Called to initialize a new m (including the bootstrap m). Called on the parent thread (main thread in case of bootstrap), can allocate memory.

func mpreinit(mp *m)

mpreinit function #

Called to initialize a new m (including the bootstrap m). Called on the parent thread (main thread in case of bootstrap), can allocate memory.

func mpreinit(mp *m)

mpreinit function #

Called to initialize a new m (including the bootstrap m). Called on the parent thread (main thread in case of bootstrap), can allocate memory.

func mpreinit(mp *m)

mpreinit function #

Called to initialize a new m (including the bootstrap m). Called on the parent thread (main thread in case of bootstrap), can allocate memory.

func mpreinit(mp *m)

mpreinit function #

Called to initialize a new m (including the bootstrap m). Called on the parent thread (main thread in case of bootstrap), can allocate memory.

func mpreinit(mp *m)

mpreinit function #

Called to initialize a new m (including the bootstrap m). Called on the parent thread (main thread in case of bootstrap), can allocate memory.

func mpreinit(mp *m)

mpreinit function #

Called to initialize a new m (including the bootstrap m). Called on the parent thread (main thread in case of bootstrap), can allocate memory.

func mpreinit(mp *m)

mpreinit function #

Ms related functions

func mpreinit(mp *m)

mpreinit function #

Called to initialize a new m (including the bootstrap m). Called on the parent thread (main thread in case of bootstrap), can allocate memory.

func mpreinit(mp *m)

mpreinit function #

Called to initialize a new m (including the bootstrap m). Called on the parent thread (main thread in case of bootstrap), can allocate memory.

func mpreinit(mp *m)

mprotect function #

go:nosplit

func mprotect(addr unsafe.Pointer, n uintptr, prot int32) (ret int32, errno int32)

mprotect function #

go:nosplit

func mprotect(addr unsafe.Pointer, n uintptr, prot int32) (unsafe.Pointer, int)

mput function #

Put mp on midle list. sched.lock must be held. May run during STW, so write barriers are not allowed. go:nowritebarrierrec

func mput(mp *m)

mrandinit function #

mrandinit initializes the random state of an m.

func mrandinit(mp *m)

msanfree function #

func msanfree(addr unsafe.Pointer, sz uintptr)

msanfree function #

go:linkname msanfree go:noescape

func msanfree(addr unsafe.Pointer, sz uintptr)

msanmalloc function #

go:linkname msanmalloc go:noescape

func msanmalloc(addr unsafe.Pointer, sz uintptr)

msanmalloc function #

func msanmalloc(addr unsafe.Pointer, sz uintptr)

msanmove function #

go:linkname msanmove go:noescape

func msanmove(dst unsafe.Pointer, src unsafe.Pointer, sz uintptr)

msanmove function #

func msanmove(dst unsafe.Pointer, src unsafe.Pointer, sz uintptr)

msanread function #

If we are running on the system stack, the C program may have marked part of that stack as uninitialized. We don't instrument the runtime, but operations like a slice copy can call msanread anyhow for values on the stack. Just ignore msanread when running on the system stack. The other msan functions are fine. go:linkname msanread go:nosplit

func msanread(addr unsafe.Pointer, sz uintptr)

msanread function #

func msanread(addr unsafe.Pointer, sz uintptr)

msanwrite function #

func msanwrite(addr unsafe.Pointer, sz uintptr)

msanwrite function #

go:linkname msanwrite go:noescape

func msanwrite(addr unsafe.Pointer, sz uintptr)

msigrestore function #

msigrestore sets the current thread's signal mask to sigmask. This is used to restore the non-Go signal mask when a non-Go thread calls a Go function. This is nosplit and nowritebarrierrec because it is called by dropm after g has been cleared. go:nosplit go:nowritebarrierrec

func msigrestore(sigmask sigset)

msigrestore function #

go:nosplit

func msigrestore(sigmask sigset)

msigrestore function #

func msigrestore(sigmask sigset)

msigrestore function #

go:nosplit

func msigrestore(sigmask sigset)

mspinning function #

func mspinning()

mstart function #

mstart is the entry-point for new Ms. It is written in assembly, uses ABI0, is marked TOPFRAME, and calls mstart0.

func mstart()

mstart0 function #

mstart0 is the Go entry-point for new Ms. This must not split the stack because we may not even have stack bounds set up yet. May run during STW (because it doesn't have a P yet), so write barriers are not allowed. go:nosplit go:nowritebarrierrec

func mstart0()

mstart1 function #

The go:noinline is to guarantee the sys.GetCallerPC/sys.GetCallerSP below are safe, so that we can set up g0.sched to return to the call of mstart1 above. go:noinline

func mstart1()

mstart_stub function #

mstart_stub provides glue code to call mstart from pthread_create.

func mstart_stub()

mstart_stub function #

glue code to call mstart from pthread_create.

func mstart_stub()

mstartm0 function #

mstartm0 implements part of mstart1 that only runs on the m0. Write barriers are allowed here because we know the GC can't be running yet, so they'll be no-ops. go:yeswritebarrierrec

func mstartm0()

mullu function #

64x64 -> 128 multiply. adapted from hacker's delight.

func mullu(u uint64, v uint64) (lo uint64, hi uint64)

munmap function #

munmap calls the munmap system call. It is implemented in assembly.

func munmap(addr unsafe.Pointer, n uintptr)

munmap function #

go:nosplit go:cgo_unsafe_args

func munmap(addr unsafe.Pointer, n uintptr)

munmap function #

func munmap(addr unsafe.Pointer, n uintptr)

munmap function #

go:nosplit go:cgo_unsafe_args

func munmap(addr unsafe.Pointer, n uintptr)

munmap function #

go:nosplit

func munmap(addr unsafe.Pointer, n uintptr)

munmap function #

go:nosplit

func munmap(addr unsafe.Pointer, n uintptr)

munmap function #

munmap calls the munmap system call. It is implemented in assembly.

func munmap(addr unsafe.Pointer, n uintptr)

munmap_trampoline function #

func munmap_trampoline()

munmap_trampoline function #

func munmap_trampoline()

mutexContended function #

func mutexContended(l *mutex) bool

mutexContended function #

func mutexContended(l *mutex) bool

mutexContended function #

func mutexContended(l *mutex) bool

mutexContended function #

func mutexContended(l *mutex) bool

mutexContended function #

func mutexContended(l *mutex) bool

mutexPreferLowLatency function #

mutexPreferLowLatency reports if this mutex prefers low latency at the risk of performance collapse. If so, we can allow all waiting threads to spin on the state word rather than go to sleep. TODO: We could have the waiting Ms each spin on their own private cache line, especially if we can put a bound on the on-CPU time that would consume. TODO: If there's a small set of mutex values with special requirements, they could make use of a more specialized lock2/unlock2 implementation. Otherwise, we're constrained to what we can fit within a single uintptr with no additional storage on the M for each lock held. go:nosplit

func mutexPreferLowLatency(l *mutex) bool

mutexProfileInternal function #

mutexProfileInternal returns the number of records n in the profile. If there are less than size records, copyFn is invoked for each record, and ok returns true.

func mutexProfileInternal(size int, copyFn func(profilerecord.BlockProfileRecord)) (n int, ok bool)

mutexWaitListHead function #

mutexWaitListHead recovers a full muintptr that was missing its low bits. With the exception of the static m0 value, it requires allocating runtime.m values in a size class with a particular minimum alignment. The 2048-byte size class allows recovering the full muintptr value even after overwriting the low 11 bits with flags. We can use those 11 bits as 3 flags and an atomically-swapped byte. go:nosplit

func mutexWaitListHead(v uintptr) muintptr

mutexevent function #

go:linkname mutexevent sync.event

func mutexevent(cycles int64, skip int)

name method #

name should be an internal detail, but widely used packages access it using linkname. Notable members of the hall of shame include: - github.com/phuslu/log Do not remove or change the type signature. See go.dev/issue/67401.

func (s srcFunc) name() string

name method #

func (t rtype) name() string

nameOff method #

func (t rtype) nameOff(off nameOff) name

nanotime function #

go:linkname nanotime go:nosplit

func nanotime() int64

nanotime function #

Exported via linkname for use by time and internal/poll. Many external packages also linkname nanotime for a fast monotonic time. Such code should be updated to use: var start = time.Now() // at init time and then replace nanotime() with time.Since(start), which is equally fast. However, all the code linknaming nanotime is never going to go away. Do not remove or change the type signature. See go.dev/issue/67401. go:linkname nanotime go:nosplit

func nanotime() int64

nanotime1 function #

go:wasmimport gojs runtime.nanotime1

func nanotime1() int64

nanotime1 function #

go:nosplit

func nanotime1() int64

nanotime1 function #

go:nosplit go:cgo_unsafe_args

func nanotime1() int64

nanotime1 function #

func nanotime1() int64

nanotime1 function #

go:nosplit

func nanotime1() int64

nanotime1 function #

func nanotime1() int64

nanotime1 function #

go:nosplit

func nanotime1() int64

nanotime1 function #

go:nosplit

func nanotime1() int64

nanotime1 function #

go:nosplit

func nanotime1() int64

nanotime_trampoline function #

func nanotime_trampoline()

needAndBindM function #

Acquire an extra m and bind it to the C thread when a pthread key has been created. go:nosplit

func needAndBindM()

needIdleMarkWorker method #

needIdleMarkWorker is a hint as to whether another idle mark worker is needed. The caller must still call addIdleMarkWorker to become one. This is mainly useful for a quick check before an expensive operation. nosplit because it may be called without a P. go:nosplit

func (c *gcControllerState) needIdleMarkWorker() bool

needUpdate method #

needUpdate returns true if the limiter's maximum update period has been exceeded, and so would benefit from an update.

func (l *gcCPULimiterState) needUpdate(now int64) bool

needm function #

needm is called when a cgo callback happens on a thread without an m (a thread not created by Go). In this case, needm is expected to find an m to use and return with m, g initialized correctly. Since m and g are not set now (likely nil, but see below) needm is limited in what routines it can call. In particular it can only call nosplit functions (textflag 7) and cannot do any scheduling that requires an m. In order to avoid needing heavy lifting here, we adopt the following strategy: there is a stack of available m's that can be stolen. Using compare-and-swap to pop from the stack has ABA races, so we simulate a lock by doing an exchange (via Casuintptr) to steal the stack head and replace the top pointer with MLOCKED (1). This serves as a simple spin lock that we can use even without an m. The thread that locks the stack in this way unlocks the stack by storing a valid stack head pointer. In order to make sure that there is always an m structure available to be stolen, we maintain the invariant that there is always one more than needed. At the beginning of the program (if cgo is in use) the list is seeded with a single m. If needm finds that it has taken the last m off the list, its job is - once it has installed its own m so that it can do things like allocate memory - to create a spare m and put it on the list. Each of these extra m's also has a g0 and a curg that are pressed into service as the scheduling stack and current goroutine for the duration of the cgo callback. It calls dropm to put the m back on the list, 1. when the callback is done with the m in non-pthread platforms, 2. or when the C thread exiting on pthread platforms. The signal argument indicates whether we're called from a signal handler. go:nosplit

func needm(signal bool)

needsAdd method #

needsAdd reports whether t needs to be added to a timers heap. t must be locked.

func (t *timer) needsAdd() bool

netbsdMstart function #

mstart is the entry-point for new Ms. It is written in assembly, uses ABI0, is marked TOPFRAME, and calls netbsdMstart0.

func netbsdMstart()

netbsdMstart0 function #

netbsdMstart0 is the function call that starts executing a newly created thread. On NetBSD, a new thread inherits the signal stack of the creating thread. That confuses minit, so we remove that signal stack here before calling the regular mstart. It's a bit baroque to remove a signal stack here only to add one in minit, but it's a simple change that keeps NetBSD working like other OS's. At this point all signals are blocked, so there is no race. go:nosplit

func netbsdMstart0()

netpoll function #

netpoll checks for ready network connections. Returns a list of goroutines that become runnable, and a delta to add to netpollWaiters. This must never return an empty list with a non-zero delta. delay < 0: blocks indefinitely delay == 0: does not block, just polls delay > 0: block for up to that many nanoseconds

func netpoll(delay int64) (gList, int32)

netpoll function #

Polls for ready network connections. Returns a list of goroutines that become runnable, and a delta to add to netpollWaiters. This must never return an empty list with a non-zero delta.

func netpoll(delay int64) (gList, int32)

netpoll function #

netpoll checks for ready network connections. Returns a list of goroutines that become runnable, and a delta to add to netpollWaiters. This must never return an empty list with a non-zero delta. delay < 0: blocks indefinitely delay == 0: does not block, just polls delay > 0: block for up to that many nanoseconds

func netpoll(delay int64) (gList, int32)

netpoll function #

func netpoll(delay int64) (gList, int32)

netpoll function #

netpoll checks for ready network connections. Returns a list of goroutines that become runnable, and a delta to add to netpollWaiters. This must never return an empty list with a non-zero delta. delay < 0: blocks indefinitely delay == 0: does not block, just polls delay > 0: block for up to that many nanoseconds

func netpoll(delay int64) (gList, int32)

netpoll function #

func netpoll(delay int64) (gList, int32)

netpoll function #

netpoll checks for ready network connections. Returns a list of goroutines that become runnable, and a delta to add to netpollWaiters. This must never return an empty list with a non-zero delta. delay < 0: blocks indefinitely delay == 0: does not block, just polls delay > 0: block for up to that many nanoseconds go:nowritebarrierrec

func netpoll(delay int64) (gList, int32)

netpoll function #

netpoll checks for ready network connections. Returns a list of goroutines that become runnable, and a delta to add to netpollWaiters. This must never return an empty list with a non-zero delta. delay < 0: blocks indefinitely delay == 0: does not block, just polls delay > 0: block for up to that many nanoseconds

func netpoll(delay int64) (gList, int32)

netpollAdjustWaiters function #

func netpollAdjustWaiters(delta int32)

netpollAdjustWaiters function #

netpollAdjustWaiters adds delta to netpollWaiters.

func netpollAdjustWaiters(delta int32)

netpollAnyWaiters function #

func netpollAnyWaiters() bool

netpollAnyWaiters function #

netpollAnyWaiters reports whether any goroutines are waiting for I/O.

func netpollAnyWaiters() bool

netpollBreak function #

func netpollBreak()

netpollBreak function #

func netpollBreak()

netpollBreak function #

netpollBreak interrupts an epollwait.

func netpollBreak()

netpollBreak function #

netpollBreak interrupts a kevent.

func netpollBreak()

netpollBreak function #

func netpollBreak()

netpollBreak function #

func netpollBreak()

netpollBreak function #

netpollBreak interrupts a port_getn wait.

func netpollBreak()

netpollBreak function #

netpollBreak interrupts a poll.

func netpollBreak()

netpollDeadline function #

func netpollDeadline(arg any, seq uintptr, delta int64)

netpollGenericInit function #

func netpollGenericInit()

netpollGenericInit function #

func netpollGenericInit()

netpollIsPollDescriptor function #

func netpollIsPollDescriptor(fd uintptr) bool

netpollIsPollDescriptor function #

func netpollIsPollDescriptor(fd uintptr) bool

netpollIsPollDescriptor function #

func netpollIsPollDescriptor(fd uintptr) bool

netpollIsPollDescriptor function #

func netpollIsPollDescriptor(fd uintptr) bool

netpollIsPollDescriptor function #

func netpollIsPollDescriptor(fd uintptr) bool

netpollIsPollDescriptor function #

func netpollIsPollDescriptor(fd uintptr) bool

netpollIsPollDescriptor function #

func netpollIsPollDescriptor(fd uintptr) bool

netpollIsPollDescriptor function #

func netpollIsPollDescriptor(fd uintptr) bool

netpollQueueTimer function #

netpollQueueTimer queues a timer to wake up the poller after the given delay. It returns true if the timer expired during this call.

func netpollQueueTimer(delay int64) (signaled bool)

netpollReadDeadline function #

func netpollReadDeadline(arg any, seq uintptr, delta int64)

netpollWriteDeadline function #

func netpollWriteDeadline(arg any, seq uintptr, delta int64)

netpollarm function #

func netpollarm(pd *pollDesc, mode int)

netpollarm function #

subscribe the fd to the port such that port_getn will return one event.

func netpollarm(pd *pollDesc, mode int)

netpollarm function #

func netpollarm(pd *pollDesc, mode int)

netpollarm function #

func netpollarm(pd *pollDesc, mode int)

netpollarm function #

func netpollarm(pd *pollDesc, mode int)

netpollarm function #

func netpollarm(pd *pollDesc, mode int)

netpollarm function #

func netpollarm(pd *pollDesc, mode int)

netpollblock function #

returns true if IO is ready, or false if timed out or closed waitio - wait only for completed IO, ignore errors Concurrent calls to netpollblock in the same mode are forbidden, as pollDesc can hold only a single waiting goroutine for each mode.

func netpollblock(pd *pollDesc, mode int32, waitio bool) bool

netpollblockcommit function #

func netpollblockcommit(gp *g, gpp unsafe.Pointer) bool

netpollcheckerr function #

func netpollcheckerr(pd *pollDesc, mode int32) int

netpollclose function #

func netpollclose(fd uintptr) int32

netpollclose function #

func netpollclose(fd uintptr) int32

netpollclose function #

func netpollclose(fd uintptr) int32

netpollclose function #

func netpollclose(fd uintptr) int32

netpollclose function #

func netpollclose(fd uintptr) int32

netpollclose function #

func netpollclose(fd uintptr) int32

netpollclose function #

func netpollclose(fd uintptr) uintptr

netpolldeadlineimpl function #

func netpolldeadlineimpl(pd *pollDesc, seq uintptr, read bool, write bool)

netpolldisarm function #

func netpolldisarm(pd *pollDesc, mode int32)

netpollgoready function #

func netpollgoready(gp *g, traceskip int)

netpollinit function #

func netpollinit()

netpollinit function #

func netpollinit()

netpollinit function #

func netpollinit()

netpollinit function #

func netpollinit()

netpollinit function #

func netpollinit()

netpollinit function #

func netpollinit()

netpollinit function #

func netpollinit()

netpollinited function #

func netpollinited() bool

netpollinited function #

func netpollinited() bool

netpollopen function #

func netpollopen(fd uintptr, pd *pollDesc) int32

netpollopen function #

func netpollopen(fd uintptr, pd *pollDesc) int32

netpollopen function #

func netpollopen(fd uintptr, pd *pollDesc) uintptr

netpollopen function #

func netpollopen(fd uintptr, pd *pollDesc) int32

netpollopen function #

func netpollopen(fd uintptr, pd *pollDesc) int32

netpollopen function #

func netpollopen(fd uintptr, pd *pollDesc) int32

netpollopen function #

func netpollopen(fd uintptr, pd *pollDesc) int32

netpollready function #

netpollready is called by the platform-specific netpoll function. It declares that the fd associated with pd is ready for I/O. The toRun argument is used to build a list of goroutines to return from netpoll. The mode argument is 'r', 'w', or 'r'+'w' to indicate whether the fd is ready for reading or writing or both. This returns a delta to apply to netpollWaiters. This may run while the world is stopped, so write barriers are not allowed. go:nowritebarrier

func netpollready(toRun *gList, pd *pollDesc, mode int32) int32

netpollunblock function #

netpollunblock moves either pd.rg (if mode == 'r') or pd.wg (if mode == 'w') into the pdReady state. This returns any goroutine blocked on pd.{rg,wg}. It adds any adjustment to netpollWaiters to *delta; this adjustment should be applied after the goroutine has been marked ready.

func netpollunblock(pd *pollDesc, mode int32, ioready bool, delta *int32) *g

netpollupdate function #

Updates the association with a new set of interested events. After this call, port_getn will return one and only one event for that particular descriptor, so this function needs to be called again.

func netpollupdate(pd *pollDesc, set uint32, clear uint32)

netpollwakeup function #

netpollwakeup writes on wrwake to wakeup poll before any changes.

func netpollwakeup()

new method #

new allocates a new object of the provided type into the arena, and returns its pointer. This operation is not safe to call concurrently with other operations on the same arena.

func (a *userArena) new(typ *_type) unsafe.Pointer

newAllocBits function #

newAllocBits returns a pointer to 8 byte aligned bytes to be used for this span's alloc bits. newAllocBits is used to provide newly initialized spans allocation bits. For spans not being initialized the mark bits are repurposed as allocation bits when the span is swept.

func newAllocBits(nelems uintptr) *gcBits

newArenaMayUnlock function #

newArenaMayUnlock allocates and zeroes a gcBits arena. The caller must hold gcBitsArena.lock. This may temporarily release it.

func newArenaMayUnlock() *gcBitsArena

newBucket function #

newBucket allocates a bucket with the given type and number of stack entries.

func newBucket(typ bucketType, nstk int) *bucket

newInlineUnwinder function #

newInlineUnwinder creates an inlineUnwinder initially set to the inner-most inlined frame at PC. PC should be a "call PC" (not a "return PC"). This unwinder uses non-strict handling of PC because it's assumed this is only ever used for symbolic debugging. If things go really wrong, it'll just fall back to the outermost frame. newInlineUnwinder should be an internal detail, but widely used packages access it using linkname. Notable members of the hall of shame include: - github.com/phuslu/log Do not remove or change the type signature. See go.dev/issue/67401. go:linkname newInlineUnwinder

func newInlineUnwinder(f funcInfo, pc uintptr) (inlineUnwinder, inlineFrame)

newMarkBits function #

newMarkBits returns a pointer to 8 byte aligned bytes to be used for a span's mark bits.

func newMarkBits(nelems uintptr) *gcBits

newPinnerBits method #

newPinnerBits returns a pointer to 8 byte aligned bytes to be used for this span's pinner bits. newPinnerBits is used to mark objects that are pinned. They are copied when the span is swept.

func (s *mspan) newPinnerBits() *pinnerBits

newProfBuf function #

newProfBuf returns a new profiling buffer with room for a header of hdrsize words and a buffer of at least bufwords words.

func newProfBuf(hdrsize int, bufwords int, tags int) *profBuf

newSpecialsIter function #

func newSpecialsIter(span *mspan) specialsIter

newTimer function #

newTimer allocates and returns a new time.Timer or time.Ticker (same layout) with the given parameters. go:linkname newTimer time.newTimer

func newTimer(when int64, period int64, f func(arg any, seq uintptr, delay int64), arg any, c *hchan) *timeTimer

newTraceMapNode method #

func (tab *traceMap) newTraceMapNode(data unsafe.Pointer, size uintptr, hash uintptr, id uint64) *traceMapNode

newUserArena function #

newUserArena creates a new userArena ready to be used.

func newUserArena() *userArena

newUserArenaChunk function #

newUserArenaChunk allocates a user arena chunk, which maps to a single heap arena and single span. Returns a pointer to the base of the chunk (this is really important: we need to keep the chunk alive) and the span.

func newUserArenaChunk() (unsafe.Pointer, *mspan)

newWakeableSleep function #

newWakeableSleep initializes a new wakeableSleep and returns it.

func newWakeableSleep() *wakeableSleep

newarray function #

newarray allocates an array of n elements of type typ. newarray should be an internal detail, but widely used packages access it using linkname. Notable members of the hall of shame include: - github.com/RomiChan/protobuf - github.com/segmentio/encoding - github.com/ugorji/go/codec Do not remove or change the type signature. See go.dev/issue/67401. go:linkname newarray

func newarray(typ *_type, n int) unsafe.Pointer

newcoro function #

newcoro creates a new coro containing a goroutine blocked waiting to run f and returns that coro.

func newcoro(f func(*coro)) *coro

newdefer function #

Allocate a Defer, usually using per-P pool. Each defer must be released with freedefer. The defer is not added to any defer chain yet.

func newdefer() *_defer

newextram function #

newextram allocates m's and puts them on the extra list. It is called with a working local m, so that it can do things like call schedlock and allocate.

func newextram()

newm function #

Create a new m. It will start off with a call to fn, or else the scheduler. fn needs to be static and not a heap allocated closure. May run with m.p==nil, so write barriers are not allowed. id is optional pre-allocated m ID. Omit by passing -1. go:nowritebarrierrec

func newm(fn func(), pp *p, id int64)

newm1 function #

func newm1(mp *m)

newobject function #

implementation of new builtin compiler (both frontend and SSA backend) knows the signature of this function.

func newobject(typ *_type) unsafe.Pointer

newosproc function #

May run with m.p==nil, so write barriers are not allowed. go:nowritebarrier

func newosproc(mp *m)

newosproc function #

May run with m.p==nil, so write barriers are not allowed. go:nowritebarrier

func newosproc(mp *m)

newosproc function #

May run with m.p==nil, so write barriers are not allowed. This function is called by newosproc0, so it is also required to operate without stack guards. go:nowritebarrierrec go:nosplit

func newosproc(mp *m)

newosproc function #

May run with m.p==nil, so write barriers are not allowed. go:nowritebarrierrec

func newosproc(mp *m)

newosproc function #

May run with m.p==nil, so write barriers are not allowed. go:nowritebarrier

func newosproc(mp *m)

newosproc function #

May run with m.p==nil, so write barriers are not allowed. go:nowritebarrier

func newosproc(mp *m)

newosproc function #

May run with m.p==nil, so write barriers are not allowed. go:nowritebarrierrec

func newosproc(mp *m)

newosproc function #

May run with m.p==nil, so write barriers are not allowed. go:nowritebarrier

func newosproc(mp *m)

newosproc function #

func newosproc(mp *m)

newosproc function #

May run with m.p==nil, so write barriers are not allowed. go:nowritebarrier

func newosproc(mp *m)

newosproc function #

May run with m.p==nil, so write barriers are not allowed. go:nowritebarrier

func newosproc(mp *m)

newosproc function #

May run with m.p==nil, so write barriers are not allowed. go:nowritebarrier

func newosproc(mp *m)

newosproc0 function #

newosproc0 is a version of newosproc that can be called before the runtime is initialized. This function is not safe to use after initialization as it does not pass an M as fnarg. go:nosplit

func newosproc0(stacksize uintptr, fn uintptr)

newosproc0 function #

Used by the C library build mode. On Linux this function would allocate a stack, but that's not necessary for Windows. No stack guards are present and the GC has not been initialized, so write barriers will fail. go:nowritebarrierrec go:nosplit

func newosproc0(mp *m, stk unsafe.Pointer)

newosproc0 function #

Version of newosproc that doesn't require a valid G. go:nosplit

func newosproc0(stacksize uintptr, fn unsafe.Pointer)

newosproc0 function #

Version of newosproc that doesn't require a valid G. go:nosplit

func newosproc0(stacksize uintptr, fn unsafe.Pointer)

newosproc0 function #

newosproc0 is a version of newosproc that can be called before the runtime is initialized. This function is not safe to use after initialization as it does not pass an M as fnarg. go:nosplit

func newosproc0(stacksize uintptr, fn *funcDescriptor)

newoverflow method #

func (h *hmap) newoverflow(t *maptype, b *bmap) *bmap

newproc function #

Create a new g running fn. Put it on the queue of g's waiting to run. The compiler turns a go statement into a call to this.

func newproc(fn *funcval)

newproc1 function #

Create a new g in state _Grunnable (or _Gwaiting if parked is true), starting at fn. callerpc is the address of the go statement that created this. The caller is responsible for adding the new g to the scheduler. If parked is true, waitreason must be non-zero.

func newproc1(fn *funcval, callergp *g, callerpc uintptr, parked bool, waitreason waitReason) *g

newstack function #

Called from runtime·morestack when more stack is needed. Allocate larger stack and relocate to new stack. Stack growth is multiplicative, for constant amortized cost. g->atomicstatus will be Grunning or Gscanrunning upon entry. If the scheduler is trying to stop this g, then it will set preemptStop. This must be nowritebarrierrec because it can be called as part of stack growth from other nowritebarrierrec functions, but the compiler doesn't check this. go:nowritebarrierrec

func newstack()

next method #

func (enum *randomEnum) next()

next method #

next provides a new sample to the controller. input is the sample, setpoint is the desired point, and period is how much time (in whatever unit makes the most sense) has passed since the last sample. Returns a new value for the variable it's controlling, and whether the operation completed successfully. One reason this might fail is if error has been growing in an unbounded manner, to the point of overflow. In the specific case of an error overflow occurs, the errOverflow field will be set and the rest of the controller's internal state will be fully reset.

func (c *piController) next(input float64, setpoint float64, period float64) (float64, bool)

next method #

func (i *specialsIter) next()

next method #

func (u *unwinder) next()

next method #

next returns the frame representing uf's logical caller.

func (u *inlineUnwinder) next(uf inlineFrame) inlineFrame

next method #

next advances the pointers iterator, returning the updated iterator and the address of the next pointer. limit must be the same each time it is passed to next. nosplit because it is used during write barriers and must not be preempted. go:nosplit

func (tp typePointers) next(limit uintptr) (typePointers, uintptr)

nextDefer method #

nextDefer returns the next deferred function to invoke, if any. Note: The "ok bool" result is necessary to correctly handle when the deferred function itself was nil (e.g., "defer (func())(nil)").

func (p *_panic) nextDefer() (func(), bool)

nextFast method #

nextFast is the fast path of next. nextFast is written to be inlineable and, as the name implies, fast. Callers that are performance-critical should iterate using the following pattern: for { var addr uintptr if tp, addr = tp.nextFast(); addr == 0 { if tp, addr = tp.next(limit); addr == 0 { break } } // Use addr. ... } nosplit because it is used during write barriers and must not be preempted. go:nosplit

func (tp typePointers) nextFast() (typePointers, uintptr)

nextFrame method #

nextFrame finds the next frame that contains deferred calls, if any.

func (p *_panic) nextFrame() (ok bool)

nextFree method #

nextFree returns the next free object from the cached span if one is available. Otherwise it refills the cache with a span with an available object and returns that object along with a flag indicating that this was a heavy weight allocation. If it is a heavy weight allocation the caller must determine whether a new GC cycle needs to be started or if the GC is active whether this goroutine needs to assist the GC. Must run in a non-preemptible context since otherwise the owner of c could change.

func (c *mcache) nextFree(spc spanClass) (v gclinkptr, s *mspan, checkGCTrigger bool)

nextFreeFast function #

nextFreeFast returns the next free object if one is quickly available. Otherwise it returns 0.

func nextFreeFast(s *mspan) gclinkptr

nextFreeIndex method #

nextFreeIndex returns the index of the next free object in s at or after s.freeindex. There are hardware instructions that can be used to make this faster if profiling warrants it.

func (s *mspan) nextFreeIndex() uint16

nextGen method #

nextGen moves the scavenger forward one generation. Must be called once per GC cycle, but may be called more often to force more memory to be released. nextGen may only run concurrently with find.

func (s *scavengeIndex) nextGen()

nextMarkBitArenaEpoch function #

nextMarkBitArenaEpoch establishes a new epoch for the arenas holding the mark bits. The arenas are named relative to the current GC cycle which is demarcated by the call to finishweep_m. All current spans have been swept. During that sweep each span allocated room for its gcmarkBits in gcBitsArenas.next block. gcBitsArenas.next becomes the gcBitsArenas.current where the GC will mark objects and after each span is swept these bits will be used to allocate objects. gcBitsArenas.current becomes gcBitsArenas.previous where the span's gcAllocBits live until all the spans have been swept during this GC cycle. The span's sweep extinguishes all the references to gcBitsArenas.previous by pointing gcAllocBits into the gcBitsArenas.current. The gcBitsArenas.previous is released to the gcBitsArenas.free list.

func nextMarkBitArenaEpoch()

nextSample function #

nextSample returns the next sampling point for heap profiling. The goal is to sample allocations on average every MemProfileRate bytes, but with a completely random distribution over the allocation timeline; this corresponds to a Poisson process with parameter MemProfileRate. In Poisson processes, the distance between two samples follows the exponential distribution (exp(MemProfileRate)), so the best return value is a random number taken from an exponential distribution whose mean is MemProfileRate.

func nextSample() int64

nextSampleNoFP function #

nextSampleNoFP is similar to nextSample, but uses older, simpler code to avoid floating point.

func nextSampleNoFP() int64

nextSeq method #

nextSeq returns the next sequence number for the resource.

func (r *traceSchedResourceState) nextSeq(gen uintptr) traceArg

nextSpanForSweep method #

nextSpanForSweep finds and pops the next span for sweeping from the central sweep buffers. It returns ownership of the span to the caller. Returns nil if no such span exists.

func (h *mheap) nextSpanForSweep() *mspan

nextslicecap function #

nextslicecap computes the next appropriate slice length.

func nextslicecap(newLen int, oldCap int) int

nilfunc function #

go:nosplit

func nilfunc()

nilinterequal function #

func nilinterequal(p unsafe.Pointer, q unsafe.Pointer) bool

nilinterhash function #

nilinterhash should be an internal detail, but widely used packages access it using linkname. Notable members of the hall of shame include: - github.com/anacrolix/stm - github.com/aristanetworks/goarista Do not remove or change the type signature. See go.dev/issue/67401. go:linkname nilinterhash

func nilinterhash(p unsafe.Pointer, h uintptr) uintptr

noEscapePtr function #

noEscapePtr hides a pointer from escape analysis. See noescape. USE CAREFULLY! go:nosplit

func noEscapePtr(p *T) *T

noSignalStack function #

This is called when we receive a signal when there is no signal stack. This can only happen if non-Go code calls sigaltstack to disable the signal stack.

func noSignalStack(sig uint32)

noescape function #

noescape hides a pointer from escape analysis. noescape is the identity function but escape analysis doesn't think the output depends on the input. noescape is inlined and currently compiles down to zero instructions. USE CAREFULLY! noescape should be an internal detail, but widely used packages access it using linkname. Notable members of the hall of shame include: - github.com/bytedance/gopkg - github.com/ebitengine/purego - github.com/hamba/avro/v2 - github.com/puzpuzpuz/xsync/v3 - github.com/songzhibin97/gkit Do not remove or change the type signature. See go.dev/issue/67401. go:linkname noescape go:nosplit

func noescape(p unsafe.Pointer) unsafe.Pointer

noldbuckets method #

noldbuckets calculates the number of buckets prior to the current map growth.

func (h *hmap) noldbuckets() uintptr

nonblockingPipe function #

func nonblockingPipe() (r int32, w int32, errno int32)

nonblockingPipe function #

func nonblockingPipe() (r int32, w int32, errno int32)

noscan method #

go:nosplit

func (sc spanClass) noscan() bool

notInitialized function #

func notInitialized()

notInitialized1 function #

Called if a wasmexport function is called before runtime initialization go:nosplit

func notInitialized1()

noteclear function #

One-time notifications.

func noteclear(n *note)

noteclear function #

One-time notifications.

func noteclear(n *note)

noteclear function #

func noteclear(n *note)

noteclear function #

One-time notifications.

func noteclear(n *note)

noted function #

func noted(mode int32) int32

notesleep function #

func notesleep(n *note)

notesleep function #

func notesleep(n *note)

notesleep function #

func notesleep(n *note)

notesleep function #

func notesleep(n *note)

notetsleep function #

func notetsleep(n *note, ns int64) bool

notetsleep function #

func notetsleep(n *note, ns int64) bool

notetsleep function #

func notetsleep(n *note, ns int64) bool

notetsleep function #

func notetsleep(n *note, ns int64) bool

notetsleep_internal function #

go:nosplit

func notetsleep_internal(n *note, ns int64, gp *g, deadline int64) bool

notetsleep_internal function #

May run with m.p==nil if called from notetsleep, so write barriers are not allowed. go:nosplit go:nowritebarrier

func notetsleep_internal(n *note, ns int64) bool

notetsleepg function #

same as runtime·notetsleep, but called on user g (not g0)

func notetsleepg(n *note, ns int64) bool

notetsleepg function #

same as runtime·notetsleep, but called on user g (not g0) calls only nosplit functions between entersyscallblock/exitsyscall.

func notetsleepg(n *note, ns int64) bool

notetsleepg function #

same as runtime·notetsleep, but called on user g (not g0)

func notetsleepg(n *note, ns int64) bool

notetsleepg function #

same as runtime·notetsleep, but called on user g (not g0) calls only nosplit functions between entersyscallblock/exitsyscall.

func notetsleepg(n *note, ns int64) bool

notewakeup function #

func notewakeup(n *note)

notewakeup function #

func notewakeup(n *note)

notewakeup function #

func notewakeup(n *note)

notewakeup function #

func notewakeup(n *note)

notify function #

go:noescape

func notify(fn unsafe.Pointer) int32

notifyListAdd function #

notifyListAdd adds the caller to a notify list such that it can receive notifications. The caller must eventually call notifyListWait to wait for such a notification, passing the returned ticket number. go:linkname notifyListAdd sync.runtime_notifyListAdd

func notifyListAdd(l *notifyList) uint32

notifyListCheck function #

go:linkname notifyListCheck sync.runtime_notifyListCheck

func notifyListCheck(sz uintptr)

notifyListNotifyAll function #

notifyListNotifyAll notifies all entries in the list. go:linkname notifyListNotifyAll sync.runtime_notifyListNotifyAll

func notifyListNotifyAll(l *notifyList)

notifyListNotifyOne function #

notifyListNotifyOne notifies one entry in the list. go:linkname notifyListNotifyOne sync.runtime_notifyListNotifyOne

func notifyListNotifyOne(l *notifyList)

notifyListWait function #

notifyListWait waits for a notification. If one has been sent since notifyListAdd was called, it returns immediately. Otherwise, it blocks. go:linkname notifyListWait sync.runtime_notifyListWait

func notifyListWait(l *notifyList, t uint32)

nsToSec function #

nsToSec takes a duration in nanoseconds and converts it to seconds as a float64.

func nsToSec(ns int64) float64

nsec function #

go:noescape

func nsec(*int64) int64

objBase method #

objBase returns the base pointer for the object containing addr in span. Assumes that addr points into a valid part of span (span.base() <= addr < span.limit). go:nosplit

func (span *mspan) objBase(addr uintptr) uintptr

objIndex method #

nosplit, because it is called by other nosplit code like findObject go:nosplit

func (s *mspan) objIndex(p uintptr) uintptr

obsdsigprocmask function #

go:noescape

func obsdsigprocmask(how int32, new sigset) sigset

ofObject method #

ofObject returns the pinState of the n'th object. nosplit, because it's called by isPinned, which is nosplit go:nosplit

func (p *pinnerBits) ofObject(n uintptr) pinState

offAddrToLevelIndex function #

offAddrToLevelIndex converts an address in the offset address space to the index into summary[level] containing addr.

func offAddrToLevelIndex(level int, addr offAddr) int

offset method #

func (b bitCursor) offset(cnt uintptr) bitCursor

ok method #

ok returns true if the traceLocker is valid (i.e. tracing is enabled). nosplit because it's called on the syscall path when stack movement is forbidden. go:nosplit

func (tl traceLocker) ok() bool

oldbucketmask method #

oldbucketmask provides a mask that can be applied to calculate n % noldbuckets().

func (h *hmap) oldbucketmask() uintptr

oldmask method #

func (c *sigctxt) oldmask() uint32

oldmask method #

func (c *sigctxt) oldmask() uint32

oldmask method #

func (c *sigctxt) oldmask() uint32

oldmask method #

func (c *sigctxt) oldmask() uint32

oldmask method #

func (c *sigctxt) oldmask() uint64

oneNewExtraM function #

oneNewExtraM allocates an m and puts it on the extra list.

func oneNewExtraM()

open function #

go:nosplit

func open(path *byte, mode int32, perm int32) int32

open function #

go:noescape

func open(name *byte, mode int32, perm int32) int32

open function #

go:noescape

func open(name *byte, mode int32, perm int32) int32

open function #

Stubs so tests can link correctly. These should never be called.

func open(name *byte, mode int32, perm int32) int32

open function #

go:nosplit go:cgo_unsafe_args

func open(name *byte, mode int32, perm int32) (ret int32)

open function #

go:nosplit go:cgo_unsafe_args

func open(name *byte, mode int32, perm int32) (ret int32)

open function #

go:noescape

func open(name *byte, mode int32, perm int32) int32

open function #

Stubs so tests can link correctly. These should never be called.

func open(name *byte, mode int32, perm int32) int32

open function #

go:nosplit

func open(name *byte, mode int32, perm int32) int32

open_trampoline function #

func open_trampoline()

open_trampoline function #

func open_trampoline()

osArchInit function #

func osArchInit()

osArchInit function #

func osArchInit()

osArchInit function #

func osArchInit()

osArchInit function #

func osArchInit()

osArchInit function #

func osArchInit()

osArchInit function #

func osArchInit()

osArchInit function #

func osArchInit()

osArchInit function #

func osArchInit()

osArchInit function #

func osArchInit()

osArchInit function #

func osArchInit()

osPreemptExtEnter function #

go:nosplit

func osPreemptExtEnter(mp *m)

osPreemptExtEnter function #

osPreemptExtEnter is called before entering external code that may call ExitProcess. This must be nosplit because it may be called from a syscall with untyped stack slots, so the stack must not be grown or scanned. go:nosplit

func osPreemptExtEnter(mp *m)

osPreemptExtExit function #

osPreemptExtExit is called after returning from external code that may call ExitProcess. See osPreemptExtEnter for why this is nosplit. go:nosplit

func osPreemptExtExit(mp *m)

osPreemptExtExit function #

go:nosplit

func osPreemptExtExit(mp *m)

osRelax function #

osRelax is called by the scheduler when transitioning to and from all Ps being idle. Some versions of Windows have high resolution timer. For those versions osRelax is noop. For Windows versions without high resolution timer, osRelax adjusts the system-wide timer resolution. Go needs a high resolution timer while running and there's little extra cost if we're already using the CPU, but if all Ps are idle there's no need to consume extra power to drive the high-res timer.

func osRelax(relax bool) uint32

osRelax function #

osRelax is called by the scheduler when transitioning to and from all Ps being idle.

func osRelax(relax bool)

osSetupTLS function #

osSetupTLS is called by needm to set up TLS for non-Go threads. Defined in assembly.

func osSetupTLS(mp *m)

osSetupTLS function #

go:nosplit

func osSetupTLS(mp *m)

osStackAlloc function #

func osStackAlloc(s *mspan)

osStackAlloc function #

osStackAlloc performs OS-specific initialization before s is used as stack memory.

func osStackAlloc(s *mspan)

osStackFree function #

osStackFree undoes the effect of osStackAlloc before s is returned to the heap.

func osStackFree(s *mspan)

osStackFree function #

func osStackFree(s *mspan)

osStackRemap function #

func osStackRemap(s *mspan, flags int32)

os_beforeExit function #

os_beforeExit is called from os.Exit(0). go:linkname os_beforeExit os.runtime_beforeExit

func os_beforeExit(exitCode int)

os_runtime_args function #

go:linkname os_runtime_args os.runtime_args

func os_runtime_args() []string

os_sigpipe function #

Do nothing on WASM platform, always return EPIPE to caller. go:linkname os_sigpipe os.sigpipe

func os_sigpipe()

os_sigpipe function #

go:linkname os_sigpipe os.sigpipe

func os_sigpipe()

osinit function #

func osinit()

osinit function #

func osinit()

osinit function #

func osinit()

osinit function #

func osinit()

osinit function #

func osinit()

osinit function #

func osinit()

osinit function #

func osinit()

osinit function #

BSD interface for threading.

func osinit()

osinit function #

func osinit()

osinit function #

func osinit()

osinit function #

func osinit()

osinit_hack function #

osinit_hack is a clumsy hack to work around Apple libc bugs causing fork+exec to hang in the child process intermittently. See go.dev/issue/33565 and go.dev/issue/56784 for a few reports. The stacks obtained from the hung child processes are in libSystem_atfork_child, which is supposed to reinitialize various parts of the C library in the new process. One common stack dies in _notify_fork_child calling _notify_globals (inlined) calling _os_alloc_once, because _os_alloc_once detects that the once lock is held by the parent process and then calls _os_once_gate_corruption_abort. The allocation is setting up the globals for the notification subsystem. See the source code at [1]. To work around this, we can allocate the globals earlier in the Go program's lifetime, before any execs are involved, by calling any notify routine that is exported, calls _notify_globals, and doesn't do anything too expensive otherwise. notify_is_valid_token(0) fits the bill. The other common stack dies in xpc_atfork_child calling _objc_msgSend_uncached which ends up in WAITING_FOR_ANOTHER_THREAD_TO_FINISH_CALLING_+initialize. Of course, whatever thread the child is waiting for is in the parent process and is not going to finish anything in the child process. There is no public source code for these routines, so it is unclear exactly what the problem is. An Apple engineer suggests using xpc_date_create_from_current, which empirically does fix the problem. So osinit_hack_trampoline (in sys_darwin_$GOARCH.s) calls notify_is_valid_token(0) and xpc_date_create_from_current(), which makes the fork+exec hangs stop happening. If Apple fixes the libc bug in some future version of macOS, then we can remove this awful code. go:nosplit

func osinit_hack()

osinit_hack_trampoline function #

func osinit_hack_trampoline()

osyield function #

go:nosplit

func osyield()

osyield function #

go:nosplit

func osyield()

osyield function #

func osyield()

osyield function #

go:nosplit

func osyield()

osyield function #

func osyield()

osyield function #

go:nosplit

func osyield()

osyield function #

func osyield()

osyield function #

go:nosplit

func osyield()

osyield function #

func osyield()

osyield function #

go:nosplit

func osyield()

osyield function #

func osyield()

osyield function #

func osyield()

osyield1 function #

func osyield1()

osyield1 function #

func osyield1()

osyield_no_g function #

go:nosplit

func osyield_no_g()

osyield_no_g function #

go:nosplit

func osyield_no_g()

osyield_no_g function #

go:nosplit

func osyield_no_g()

osyield_no_g function #

go:nosplit

func osyield_no_g()

osyield_no_g function #

go:nosplit

func osyield_no_g()

osyield_no_g function #

go:nosplit

func osyield_no_g()

osyield_no_g function #

go:nosplit

func osyield_no_g()

osyield_no_g function #

go:nosplit

func osyield_no_g()

osyield_no_g function #

go:nosplit

func osyield_no_g()

osyield_no_g function #

go:nosplit

func osyield_no_g()

osyield_no_g function #

go:nosplit

func osyield_no_g()

osyield_no_g function #

go:nosplit

func osyield_no_g()

overLoadFactor function #

overLoadFactor reports whether count items placed in 1<

func overLoadFactor(count int, B uint8) bool

overflow method #

func (b *bmap) overflow(t *maptype) *bmap

p method #

go:nosplit

func (l dloggerFake) p(x any) dloggerFake

p method #

go:nosplit

func (l *dloggerImpl) p(x any) *dloggerImpl

pack method #

pack returns sc packed into a uint64.

func (sc scavChunkData) pack() uint64

packNetpollKey function #

packNetpollKey creates a key from a source and a tag. Bits that don't fit in the result are discarded.

func packNetpollKey(source uint8, pd *pollDesc) uintptr

packPallocSum function #

packPallocSum takes a start, max, and end value and produces a pallocSum.

func packPallocSum(start uint, max uint, end uint) pallocSum

packUint32 function #

func packUint32(b []byte, v uint32)

pad method #

Add padding of size bytes.

func (h writeUserArenaHeapBits) pad(s *mspan, size uintptr) writeUserArenaHeapBits

pageIndexOf function #

pageIndexOf returns the arena, page index, and page mask for pointer p. The caller must ensure p is in the heap.

func pageIndexOf(p uintptr) (arena *heapArena, pageIdx uintptr, pageMask uint8)

pages64 method #

pages64 returns a 64-bit bitmap representing a block of 64 pages aligned to 64 pages. The returned block of pages is the one containing the i'th page in this pallocBits. Each bit represents whether the page is in-use.

func (b *pallocBits) pages64(i uint) uint64

panicCheck1 function #

Check to make sure we can really generate a panic. If the panic was generated from the runtime, or from inside malloc, then convert to a throw of msg. pc should be the program counter of the compiler-generated code that triggered this panic.

func panicCheck1(pc uintptr, msg string)

panicCheck2 function #

Same as above, but calling from the runtime is allowed. Using this function is necessary for any panic that may be generated by runtime.sigpanic, since those are always called by the runtime.

func panicCheck2(err string)

panicExtendIndex function #

Implemented in assembly, as they take arguments in registers. Declared here to mark them as ABIInternal.

func panicExtendIndex(hi int, lo uint, y int)

panicExtendIndexU function #

func panicExtendIndexU(hi uint, lo uint, y int)

panicExtendSlice3Acap function #

func panicExtendSlice3Acap(hi int, lo uint, y int)

panicExtendSlice3AcapU function #

func panicExtendSlice3AcapU(hi uint, lo uint, y int)

panicExtendSlice3Alen function #

func panicExtendSlice3Alen(hi int, lo uint, y int)

panicExtendSlice3AlenU function #

func panicExtendSlice3AlenU(hi uint, lo uint, y int)

panicExtendSlice3B function #

func panicExtendSlice3B(hi int, lo uint, y int)

panicExtendSlice3BU function #

func panicExtendSlice3BU(hi uint, lo uint, y int)

panicExtendSlice3C function #

func panicExtendSlice3C(hi int, lo uint, y int)

panicExtendSlice3CU function #

func panicExtendSlice3CU(hi uint, lo uint, y int)

panicExtendSliceAcap function #

func panicExtendSliceAcap(hi int, lo uint, y int)

panicExtendSliceAcapU function #

func panicExtendSliceAcapU(hi uint, lo uint, y int)

panicExtendSliceAlen function #

func panicExtendSliceAlen(hi int, lo uint, y int)

panicExtendSliceAlenU function #

func panicExtendSliceAlenU(hi uint, lo uint, y int)

panicExtendSliceB function #

func panicExtendSliceB(hi int, lo uint, y int)

panicExtendSliceBU function #

func panicExtendSliceBU(hi uint, lo uint, y int)

panicIndex function #

Implemented in assembly, as they take arguments in registers. Declared here to mark them as ABIInternal.

func panicIndex(x int, y int)

panicIndexU function #

func panicIndexU(x uint, y int)

panicSlice3Acap function #

func panicSlice3Acap(x int, y int)

panicSlice3AcapU function #

func panicSlice3AcapU(x uint, y int)

panicSlice3Alen function #

func panicSlice3Alen(x int, y int)

panicSlice3AlenU function #

func panicSlice3AlenU(x uint, y int)

panicSlice3B function #

func panicSlice3B(x int, y int)

panicSlice3BU function #

func panicSlice3BU(x uint, y int)

panicSlice3C function #

func panicSlice3C(x int, y int)

panicSlice3CU function #

func panicSlice3CU(x uint, y int)

panicSliceAcap function #

func panicSliceAcap(x int, y int)

panicSliceAcapU function #

func panicSliceAcapU(x uint, y int)

panicSliceAlen function #

func panicSliceAlen(x int, y int)

panicSliceAlenU function #

func panicSliceAlenU(x uint, y int)

panicSliceB function #

func panicSliceB(x int, y int)

panicSliceBU function #

func panicSliceBU(x uint, y int)

panicSliceConvert function #

func panicSliceConvert(x int, y int)

panicdivide function #

go:yeswritebarrierrec

func panicdivide()

panicdottypeE function #

panicdottypeE is called when doing an e.(T) conversion and the conversion fails. have = the dynamic type we have. want = the static type we're trying to convert to. iface = the static type we're converting from.

func panicdottypeE(have *_type, want *_type, iface *_type)

panicdottypeI function #

panicdottypeI is called when doing an i.(T) conversion and the conversion fails. Same args as panicdottypeE, but "have" is the dynamic itab we have.

func panicdottypeI(have *itab, want *_type, iface *_type)

panicfloat function #

func panicfloat()

panicmakeslicecap function #

func panicmakeslicecap()

panicmakeslicelen function #

func panicmakeslicelen()

panicmem function #

func panicmem()

panicmemAddr function #

func panicmemAddr(addr uintptr)

panicnildottype function #

panicnildottype is called when doing an i.(T) conversion and the interface i is nil. want = the static type we're trying to convert to.

func panicnildottype(want *_type)

panicoverflow function #

func panicoverflow()

panicrangestate function #

go:noinline

func panicrangestate(state int)

panicshift function #

go:yeswritebarrierrec

func panicshift()

panicunsafeslicelen function #

func panicunsafeslicelen()

panicunsafeslicelen1 function #

go:yeswritebarrierrec

func panicunsafeslicelen1(pc uintptr)

panicunsafeslicenilptr function #

func panicunsafeslicenilptr()

panicunsafeslicenilptr1 function #

go:yeswritebarrierrec

func panicunsafeslicenilptr1(pc uintptr)

panicunsafestringlen function #

func panicunsafestringlen()

panicunsafestringnilptr function #

func panicunsafestringnilptr()

panicwrap function #

panicwrap generates a panic for a call to a wrapped value method with a nil pointer receiver. It is called from the generated wrapper code.

func panicwrap()

park method #

park parks the scavenger goroutine.

func (s *scavengerState) park()

park_m function #

park continuation on g0.

func park_m(gp *g)

parkunlock_c function #

func parkunlock_c(gp *g, lock unsafe.Pointer) bool

parseByteCount function #

parseByteCount parses a string that represents a count of bytes. s must match the following regular expression: ^[0-9]+(([KMGT]i)?B)?$ In other words, an integer byte count with an optional unit suffix. Acceptable suffixes include one of - KiB, MiB, GiB, TiB which represent binary IEC/ISO 80000 units, or - B, which just represents bytes. Returns an int64 because that's what its callers want and receive, but the result is always non-negative.

func parseByteCount(s string) (int64, bool)

parsedebugvars function #

func parsedebugvars()

parsegodebug function #

parsegodebug parses the godebug string, updating variables listed in dbgvars. If seen == nil, this is startup time and we process the string left to right overwriting older settings with newer ones. If seen != nil, $GODEBUG has changed and we are doing an incremental update. To avoid flapping in the case where a value is set multiple times (perhaps in the default and the environment, or perhaps twice in the environment), we process the string right-to-left and only change values not already seen. After doing this for both the environment and the default settings, the caller must also call cleargodebug(seen) to reset any now-unset values back to their defaults.

func parsegodebug(godebug string, seen map[string]bool)

partialSwept method #

partialSwept returns the spanSet which holds partially-filled swept spans for this sweepgen.

func (c *mcentral) partialSwept(sweepgen uint32) *spanSet

partialUnswept method #

partialUnswept returns the spanSet which holds partially-filled unswept spans for this sweepgen.

func (c *mcentral) partialUnswept(sweepgen uint32) *spanSet

pause function #

pause sets SP to newsp and pauses the execution of Go's WebAssembly code until an event is triggered, or call back into Go. Note: the epilogue of pause pops 8 bytes from the stack, so when returning to the host, the SP is newsp+8. If we want to set the SP such that when it calls back into Go, the Go function appears to be called from pause's caller's caller, then call pause with newsp = internal/runtime/sys.GetCallerSP()-16 (another 8 is the return PC pushed to the stack).

func pause(newsp uintptr)

pause function #

pause is only used on wasm.

func pause(newsp uintptr)

pc method #

go:nosplit go:nowritebarrierrec

func (c *sigctxt) pc() uintptr

pc method #

go:nosplit go:nowritebarrierrec

func (c *sigctxt) pc() uint64

pc method #

go:nosplit go:nowritebarrierrec

func (c *sigctxt) pc() uint32

pc method #

go:nosplit go:nowritebarrierrec

func (c *sigctxt) pc() uint64

pc method #

go:nosplit go:nowritebarrierrec

func (c *sigctxt) pc() uint32

pc method #

go:nosplit go:nowritebarrierrec

func (c *sigctxt) pc() uint64

pc method #

go:nosplit

func (l dloggerFake) pc(x uintptr) dloggerFake

pc method #

go:nosplit go:nowritebarrierrec

func (c *sigctxt) pc() uint64

pc method #

go:nosplit go:nowritebarrierrec

func (c *sigctxt) pc() uint64

pc method #

go:nosplit go:nowritebarrierrec

func (c *sigctxt) pc() uintptr

pc method #

go:nosplit go:nowritebarrierrec

func (c *sigctxt) pc() uint64

pc method #

go:nosplit go:nowritebarrierrec

func (c *sigctxt) pc() uint64

pc method #

go:nosplit go:nowritebarrierrec

func (c *sigctxt) pc() uint64

pc method #

go:nosplit go:nowritebarrierrec

func (c *sigctxt) pc() uint64

pc method #

go:nosplit go:nowritebarrierrec

func (c *sigctxt) pc() uint64

pc method #

go:nosplit

func (l *dloggerImpl) pc(x uintptr) *dloggerImpl

pc method #

go:nosplit go:nowritebarrierrec

func (c *sigctxt) pc() uint32

pc method #

go:nosplit go:nowritebarrierrec

func (c *sigctxt) pc() uint64

pc method #

go:nosplit go:nowritebarrierrec

func (c *sigctxt) pc() uint64

pc method #

func (c *sigctxt) pc() uint32

pc method #

go:nosplit go:nowritebarrierrec

func (c *sigctxt) pc() uint64

pc method #

go:nosplit go:nowritebarrierrec

func (c *sigctxt) pc() uint32

pc method #

go:nosplit go:nowritebarrierrec

func (c *sigctxt) pc() uint64

pc method #

go:nosplit go:nowritebarrierrec

func (c *sigctxt) pc() uintptr

pc method #

go:nosplit go:nowritebarrierrec

func (c *sigctxt) pc() uint64

pcdatastart function #

func pcdatastart(f funcInfo, table uint32) uint32

pcdatavalue function #

func pcdatavalue(f funcInfo, table uint32, targetpc uintptr) int32

pcdatavalue1 function #

func pcdatavalue1(f funcInfo, table uint32, targetpc uintptr, strict bool) int32

pcdatavalue2 function #

Like pcdatavalue, but also return the start PC of this PCData value.

func pcdatavalue2(f funcInfo, table uint32, targetpc uintptr) (int32, uintptr)

pcvalue function #

Returns the PCData value, and the PC where this value starts.

func pcvalue(f funcInfo, off uint32, targetpc uintptr, strict bool) (int32, uintptr)

pcvalueCacheKey function #

pcvalueCacheKey returns the outermost index in a pcvalueCache to use for targetpc. It must be very cheap to calculate. For now, align to goarch.PtrSize and reduce mod the number of entries. In practice, this appears to be fairly randomly and evenly distributed.

func pcvalueCacheKey(targetpc uintptr) uintptr

peek method #

func (r *debugLogReader) peek() (tick uint64)

persistentalloc function #

Wrapper around sysAlloc that can allocate small chunks. There is no associated free operation. Intended for things like function/type/debug-related persistent data. If align is 0, uses default align (currently 8). The returned memory will be zeroed. sysStat must be non-nil. Consider marking persistentalloc'd types not in heap by embedding internal/runtime/sys.NotInHeap. nosplit because it is used during write barriers and must not be preempted. go:nosplit

func persistentalloc(size uintptr, align uintptr, sysStat *sysMemStat) unsafe.Pointer

persistentalloc1 function #

Must run on system stack because stack growth can (re)invoke it. See issue 9174. go:systemstack

func persistentalloc1(size uintptr, align uintptr, sysStat *sysMemStat) *notInHeap

pidleget function #

pidleget tries to get a p from the _Pidle list, acquiring ownership. sched.lock must be held. May run during STW, so write barriers are not allowed. go:nowritebarrierrec

func pidleget(now int64) (*p, int64)

pidlegetSpinning function #

pidlegetSpinning tries to get a p from the _Pidle list, acquiring ownership. This is called by spinning Ms (or callers than need a spinning M) that have found work. If no P is available, this must synchronized with non-spinning Ms that may be preparing to drop their P without discovering this work. sched.lock must be held. May run during STW, so write barriers are not allowed. go:nowritebarrierrec

func pidlegetSpinning(now int64) (*p, int64)

pidleput function #

pidleput puts p on the _Pidle list. now must be a relatively recent call to nanotime or zero. Returns now or the current time if now was zero. This releases ownership of p. Once sched.lock is released it is no longer safe to use p. sched.lock must be held. May run during STW, so write barriers are not allowed. go:nowritebarrierrec

func pidleput(pp *p, now int64) int64

pinnerBitSize method #

func (s *mspan) pinnerBitSize() uintptr

pinnerGetPinCounter function #

only for tests

func pinnerGetPinCounter(addr unsafe.Pointer) *uintptr

pinnerGetPtr function #

func pinnerGetPtr(i *any) unsafe.Pointer

pipe function #

func pipe() (r int32, w int32, errno int32)

pipe function #

go:nosplit

func pipe() (r int32, w int32, errno int32)

pipe2 function #

func pipe2(flags int32) (r int32, w int32, errno int32)

pipe2 function #

go:nosplit

func pipe2(flags int32) (r int32, w int32, errno int32)

pipe2 function #

func pipe2(flags int32) (r int32, w int32, errno int32)

pipe2 function #

func pipe2(flags int32) (r int32, w int32, errno int32)

pipe2 function #

func pipe2(flags int32) (r int32, w int32, errno int32)

pipe2 function #

func pipe2(flags int32) (r int32, w int32, errno int32)

pipe2 function #

func pipe2(flags int32) (r int32, w int32, errno int32)

pipe2_trampoline function #

func pipe2_trampoline()

pipe_trampoline function #

func pipe_trampoline()

pkgPath function #

func pkgPath(n name) string

pkgpath method #

pkgpath returns the path of the package where t was defined, if available. This is not the same as the reflect package's PkgPath method, in that it returns the package path for struct and interface types, not just named types.

func (t rtype) pkgpath() string

plan9_semacquire function #

go:noescape

func plan9_semacquire(addr *uint32, block int32) int32

plan9_semrelease function #

go:noescape

func plan9_semrelease(addr *uint32, count int32) int32

plan9_tsemacquire function #

go:noescape

func plan9_tsemacquire(addr *uint32, ms int32) int32

plugin_lastmoduleinit function #

go:linkname plugin_lastmoduleinit plugin.lastmoduleinit

func plugin_lastmoduleinit() (path string, syms map[string]any, initTasks []*initTask, errstr string)

pluginftabverify function #

func pluginftabverify(md *moduledata)

pointer method #

Pointer returns the pointer from a taggedPointer.

func (tp taggedPointer) pointer() unsafe.Pointer

pointer method #

Pointer returns the pointer from a taggedPointer.

func (tp taggedPointer) pointer() unsafe.Pointer

pointerMask function #

Returns GC type info for the pointer stored in ep for testing. If ep points to the stack, only static live information will be returned (i.e. not for objects which are only dynamically live stack objects).

func pointerMask(ep any) (mask []byte)

poll function #

go:nosplit

func poll(pfds *pollfd, npfds uintptr, timeout uintptr) (int32, int32)

pollFractionalWorkerExit function #

pollFractionalWorkerExit reports whether a fractional mark worker should self-preempt. It assumes it is called from the fractional worker.

func pollFractionalWorkerExit() bool

pollOperationFromOverlappedEntry function #

pollOperationFromOverlappedEntry returns the pollOperation contained in e. It can return nil if the entry is not from internal/poll. See go.dev/issue/58870

func pollOperationFromOverlappedEntry(e *overlappedEntry) *pollOperation

pollWork function #

pollWork reports whether there is non-background work this P could be doing. This is a fairly lightweight check to be used for background work loops, like idle GC. It checks a subset of the conditions checked by the actual scheduler.

func pollWork() bool

poll_oneoff function #

go:wasmimport wasi_snapshot_preview1 poll_oneoff go:noescape

func poll_oneoff(in *subscription, out *event, nsubscriptions size, nevents *size) errno

poll_runtime_Semacquire function #

go:linkname poll_runtime_Semacquire internal/poll.runtime_Semacquire

func poll_runtime_Semacquire(addr *uint32)

poll_runtime_Semrelease function #

go:linkname poll_runtime_Semrelease internal/poll.runtime_Semrelease

func poll_runtime_Semrelease(addr *uint32)

poll_runtime_isPollServerDescriptor function #

poll_runtime_isPollServerDescriptor reports whether fd is a descriptor being used by netpoll.

func poll_runtime_isPollServerDescriptor(fd uintptr) bool

poll_runtime_pollClose function #

go:linkname poll_runtime_pollClose internal/poll.runtime_pollClose

func poll_runtime_pollClose(pd *pollDesc)

poll_runtime_pollOpen function #

go:linkname poll_runtime_pollOpen internal/poll.runtime_pollOpen

func poll_runtime_pollOpen(fd uintptr) (*pollDesc, int)

poll_runtime_pollReset function #

poll_runtime_pollReset, which is internal/poll.runtime_pollReset, prepares a descriptor for polling in mode, which is 'r' or 'w'. This returns an error code; the codes are defined above. go:linkname poll_runtime_pollReset internal/poll.runtime_pollReset

func poll_runtime_pollReset(pd *pollDesc, mode int) int

poll_runtime_pollServerInit function #

go:linkname poll_runtime_pollServerInit internal/poll.runtime_pollServerInit

func poll_runtime_pollServerInit()

poll_runtime_pollSetDeadline function #

go:linkname poll_runtime_pollSetDeadline internal/poll.runtime_pollSetDeadline

func poll_runtime_pollSetDeadline(pd *pollDesc, d int64, mode int)

poll_runtime_pollUnblock function #

go:linkname poll_runtime_pollUnblock internal/poll.runtime_pollUnblock

func poll_runtime_pollUnblock(pd *pollDesc)

poll_runtime_pollWait function #

poll_runtime_pollWait, which is internal/poll.runtime_pollWait, waits for a descriptor to be ready for reading or writing, according to mode, which is 'r' or 'w'. This returns an error code; the codes are defined above. go:linkname poll_runtime_pollWait internal/poll.runtime_pollWait

func poll_runtime_pollWait(pd *pollDesc, mode int) int

poll_runtime_pollWaitCanceled function #

go:linkname poll_runtime_pollWaitCanceled internal/poll.runtime_pollWaitCanceled

func poll_runtime_pollWaitCanceled(pd *pollDesc, mode int)

pop method #

pop removes and returns the head of queue q. It returns nil if q is empty.

func (q *gQueue) pop() *g

pop method #

func (q *noteQueue) pop() string

pop method #

pop dequeues from the queue of buffers.

func (q *traceBufQueue) pop() *traceBuf

pop method #

pop removes and returns the head of l. If l is empty, it returns nil.

func (l *gList) pop() *g

pop method #

func (head *lfstack) pop() unsafe.Pointer

pop method #

pop removes and returns a span from buffer b, or nil if b is empty. pop is safe to call concurrently with other pop and push operations.

func (b *spanSet) pop() *mspan

popDefer function #

popDefer pops the head of gp's defer list and frees it.

func popDefer(gp *g)

popList method #

popList takes all Gs in q and returns them as a gList.

func (q *gQueue) popList() gList

popcntRange method #

popcntRange counts the number of set bits in the range [i, i+n).

func (b *pageBits) popcntRange(i uint, n uint) (s uint)

port_alert function #

func port_alert(port int32, flags uint32, events uint32, user uintptr) int32

port_associate function #

func port_associate(port int32, source int32, object uintptr, events uint32, user uintptr) int32

port_create function #

func port_create() int32

port_dissociate function #

func port_dissociate(port int32, source int32, object uintptr) int32

port_getn function #

func port_getn(port int32, evs *portevent, max uint32, nget *uint32, timeout *timespec) int32

position method #

func (enum *randomEnum) position() uint32

postMallocgcDebug function #

func postMallocgcDebug(x unsafe.Pointer, elemsize uintptr, typ *_type)

postnote function #

func postnote(pid uint64, msg []byte) int

pprof_blockProfileInternal function #

go:linkname pprof_blockProfileInternal

func pprof_blockProfileInternal(p []profilerecord.BlockProfileRecord) (n int, ok bool)

pprof_cyclesPerSecond function #

runtime/pprof.runtime_cyclesPerSecond should be an internal detail, but widely used packages access it using linkname. Notable members of the hall of shame include: - github.com/grafana/pyroscope-go/godeltaprof - github.com/pyroscope-io/godeltaprof Do not remove or change the type signature. See go.dev/issue/67401. go:linkname pprof_cyclesPerSecond runtime/pprof.runtime_cyclesPerSecond

func pprof_cyclesPerSecond() int64

pprof_fpunwindExpand function #

go:linkname pprof_fpunwindExpand

func pprof_fpunwindExpand(dst []uintptr, src []uintptr) int

pprof_goroutineProfileWithLabels function #

go:linkname pprof_goroutineProfileWithLabels

func pprof_goroutineProfileWithLabels(p []profilerecord.StackRecord, labels []unsafe.Pointer) (n int, ok bool)

pprof_makeProfStack function #

go:linkname pprof_makeProfStack

func pprof_makeProfStack() []uintptr

pprof_memProfileInternal function #

go:linkname pprof_memProfileInternal

func pprof_memProfileInternal(p []profilerecord.MemProfileRecord, inuseZero bool) (n int, ok bool)

pprof_mutexProfileInternal function #

go:linkname pprof_mutexProfileInternal

func pprof_mutexProfileInternal(p []profilerecord.BlockProfileRecord) (n int, ok bool)

pprof_threadCreateInternal function #

go:linkname pprof_threadCreateInternal

func pprof_threadCreateInternal(p []profilerecord.StackRecord) (n int, ok bool)

preMallocgcDebug function #

func preMallocgcDebug(size uintptr, typ *_type) unsafe.Pointer

pread function #

go:noescape

func pread(fd int32, buf unsafe.Pointer, nbytes int32, offset int64) int32

preemptM function #

func preemptM(mp *m)

preemptM function #

func preemptM(mp *m)

preemptM function #

func preemptM(mp *m)

preemptM function #

preemptM sends a preemption request to mp. This request may be handled asynchronously and may be coalesced with other requests to the M. When the request is received, if the running G or P are marked for preemption and the goroutine is at an asynchronous safe-point, it will preempt the goroutine. It always atomically increments mp.preemptGen after handling a preemption request.

func preemptM(mp *m)

preemptPark function #

preemptPark parks gp and puts it in _Gpreempted. go:systemstack

func preemptPark(gp *g)

preemptall function #

Tell all goroutines that they have been preempted and they should stop. This function is purely best-effort. It can fail to inform a goroutine if a processor just started running it. No locks need to be held. Returns true if preemption request was issued to at least one goroutine.

func preemptall() bool

preemptone function #

Tell the goroutine running on processor P to stop. This function is purely best-effort. It can incorrectly fail to inform the goroutine. It can inform the wrong goroutine. Even if it informs the correct goroutine, that goroutine might ignore the request if it is simultaneously executing newstack. No lock needs to be held. Returns true if preemption request was issued. The actual preemption will happen at some point in the future and will be indicated by the gp->status no longer being Grunning

func preemptone(pp *p) bool

prepGoExitFrame function #

func prepGoExitFrame(sp uintptr)

prepGoExitFrame function #

func prepGoExitFrame(sp uintptr)

prepareContextForSigResume function #

func prepareContextForSigResume(c *context)

prepareContextForSigResume function #

func prepareContextForSigResume(c *context)

prepareContextForSigResume function #

func prepareContextForSigResume(c *context)

prepareContextForSigResume function #

func prepareContextForSigResume(c *context)

prepareForSweep method #

prepareForSweep flushes c if the system has entered a new sweep phase since c was populated. This must happen between the sweep phase starting and the first allocation from c.

func (c *mcache) prepareForSweep()

prepareFreeWorkbufs function #

prepareFreeWorkbufs moves busy workbuf spans to free list so they can be freed to the heap. This must only be called when all workbufs are on the empty list.

func prepareFreeWorkbufs()

preparePanic method #

preparePanic sets up the stack to look like a call to sigpanic.

func (c *sigctxt) preparePanic(sig uint32, gp *g)

preparePanic method #

preparePanic sets up the stack to look like a call to sigpanic.

func (c *sigctxt) preparePanic(sig uint32, gp *g)

preparePanic method #

preparePanic sets up the stack to look like a call to sigpanic.

func (c *sigctxt) preparePanic(sig uint32, gp *g)

preparePanic method #

preparePanic sets up the stack to look like a call to sigpanic.

func (c *sigctxt) preparePanic(sig uint32, gp *g)

preparePanic method #

preparePanic sets up the stack to look like a call to sigpanic.

func (c *sigctxt) preparePanic(sig uint32, gp *g)

preparePanic method #

preparePanic sets up the stack to look like a call to sigpanic.

func (c *sigctxt) preparePanic(sig uint32, gp *g)

preparePanic method #

preparePanic sets up the stack to look like a call to sigpanic.

func (c *sigctxt) preparePanic(sig uint32, gp *g)

preparePanic method #

preparePanic sets up the stack to look like a call to sigpanic.

func (c *sigctxt) preparePanic(sig uint32, gp *g)

preparePanic method #

preparePanic sets up the stack to look like a call to sigpanic.

func (c *sigctxt) preparePanic(sig uint32, gp *g)

preparePanic method #

preparePanic sets up the stack to look like a call to sigpanic.

func (c *sigctxt) preparePanic(sig uint32, gp *g)

preprintpanics function #

Call all Error and String methods before freezing the world. Used when crashing with panicking.

func preprintpanics(p *_panic)

preventErrorDialogs function #

func preventErrorDialogs()

printAncestorTraceback function #

printAncestorTraceback prints the traceback of the given ancestor. TODO: Unify this with gentraceback and CallersFrames.

func printAncestorTraceback(ancestor ancestorInfo)

printAncestorTracebackFuncInfo function #

printAncestorTracebackFuncInfo prints the given function info at a given pc within an ancestor traceback. The precision of this info is reduced due to only have access to the pcs at the time of the caller goroutine being created.

func printAncestorTracebackFuncInfo(f funcInfo, pc uintptr)

printArgs function #

printArgs prints function arguments in traceback.

func printArgs(f funcInfo, argp unsafe.Pointer, pc uintptr)

printCgoTraceback function #

printCgoTraceback prints a traceback of callers.

func printCgoTraceback(callers *cgoCallers)

printDebugLog function #

printDebugLog prints the debug log.

func printDebugLog()

printDebugLogImpl function #

func printDebugLogImpl()

printDebugLogPC function #

printDebugLogPC prints a single symbolized PC. If returnPC is true, pc is a return PC that must first be converted to a call PC.

func printDebugLogPC(pc uintptr, returnPC bool)

printFuncName function #

printFuncName prints a function name. name is the function name in the binary's func data table.

func printFuncName(name string)

printHeldLocks function #

nosplit to ensure it can be called in as many contexts as possible. go:nosplit

func printHeldLocks(gp *g)

printOneCgoTraceback function #

printOneCgoTraceback prints the traceback of a single cgo caller. This can print more than one line because of inlining. It returns the "stop" result of commitFrame.

func printOneCgoTraceback(pc uintptr, commitFrame func() (pr bool, stop bool), arg *cgoSymbolizerArg) bool

printScavTrace function #

printScavTrace prints a scavenge trace line to standard error. released should be the amount of memory released since the last time this was called, and forced indicates whether the scavenge was forced by the application. scavenger.lock must be held.

func printScavTrace(releasedBg uintptr, releasedEager uintptr, forced bool)

printVal method #

func (r *debugLogReader) printVal() bool

printanycustomtype function #

Invariant: each newline in the string representation is followed by a tab.

func printanycustomtype(i any)

printbool function #

func printbool(v bool)

printcomplex function #

func printcomplex(c complex128)

printcreatedby function #

func printcreatedby(gp *g)

printcreatedby1 function #

func printcreatedby1(f funcInfo, pc uintptr, goid uint64)

printeface function #

func printeface(e eface)

printfloat function #

func printfloat(v float64)

printhex function #

func printhex(v uint64)

printiface function #

func printiface(i iface)

printindented function #

printindented prints s, replacing "\n" with "\n\t".

func printindented(s string)

printint function #

func printint(v int64)

printlock function #

func printlock()

printnl function #

func printnl()

printpanics function #

Print all currently active panics. Used when crashing. Should only be called after preprintpanics.

func printpanics(p *_panic)

printpanicval function #

printpanicval prints an argument passed to panic. If panic is called with a value that has a String or Error method, it has already been converted into a string by preprintpanics. To ensure that the traceback can be unambiguously parsed even when the panic value contains "\ngoroutine" and other stack-like strings, newlines in the string representation of v are replaced by "\n\t".

func printpanicval(v any)

printpointer function #

func printpointer(p unsafe.Pointer)

printslice function #

func printslice(s []byte)

printsp function #

func printsp()

printstring function #

func printstring(s string)

printuint function #

func printuint(v uint64)

printuintptr function #

func printuintptr(p uintptr)

printunlock function #

func printunlock()

procPin function #

procPin should be an internal detail, but widely used packages access it using linkname. Notable members of the hall of shame include: - github.com/bytedance/gopkg - github.com/choleraehyq/pid - github.com/songzhibin97/gkit Do not remove or change the type signature. See go.dev/issue/67401. go:linkname procPin go:nosplit

func procPin() int

procUnpin function #

procUnpin should be an internal detail, but widely used packages access it using linkname. Notable members of the hall of shame include: - github.com/bytedance/gopkg - github.com/choleraehyq/pid - github.com/songzhibin97/gkit Do not remove or change the type signature. See go.dev/issue/67401. go:linkname procUnpin go:nosplit

func procUnpin()

proc_regionfilename function #

go:linkname proc_regionfilename runtime/pprof.proc_regionfilename

func proc_regionfilename(pid int, address uint64, buf *byte, buflen int64) int32

proc_regionfilename_trampoline function #

func proc_regionfilename_trampoline()

processWakeupEvent function #

func processWakeupEvent(kq int32, isBlocking bool)

processWakeupEvent function #

func processWakeupEvent(_ int32, isBlocking bool)

procresize function #

Change number of processors. sched.lock must be held, and the world must be stopped. gcworkbufs must not be being modified by either the GC or the write barrier code, so the GC must not be running if the number of Ps actually changes. Returns list of Ps with local work, they need to be scheduled by the caller.

func procresize(nprocs int32) *p

procyield function #

procyield should be an internal detail, but widely used packages access it using linkname. Notable members of the hall of shame include: - github.com/sagernet/sing-tun - github.com/slackhq/nebula - golang.zx2c4.com/wireguard Do not remove or change the type signature. See go.dev/issue/67401. go:linkname procyield

func procyield(cycles uint32)

profileLoop function #

func profileLoop()

profilealloc function #

profilealloc resets the current mcache's nextSample counter and records a memory profile sample. The caller must be non-preemptible and have a P.

func profilealloc(mp *m, x unsafe.Pointer, size uintptr)

profilem function #

func profilem(mp *m, thread uintptr)

progToPointerMask function #

progToPointerMask returns the 1-bit pointer mask output by the GC program prog. size the size of the region described by prog, in bytes. The resulting bitvector will have no more than size/goarch.PtrSize bits.

func progToPointerMask(prog *byte, size uintptr) bitvector

pstate method #

func (c *sigctxt) pstate() uint64

pthread_attr_destroy function #

go:nosplit go:cgo_unsafe_args

func pthread_attr_destroy(attr *pthreadattr) int32

pthread_attr_destroy function #

go:nosplit

func pthread_attr_destroy(attr *pthread_attr) int32

pthread_attr_destroy function #

func pthread_attr_destroy(attr *pthreadattr) int32

pthread_attr_destroy_trampoline function #

func pthread_attr_destroy_trampoline()

pthread_attr_getstack function #

func pthread_attr_getstack(attr *pthreadattr, addr unsafe.Pointer, size *uint64) int32

pthread_attr_getstacksize function #

go:nosplit go:cgo_unsafe_args

func pthread_attr_getstacksize(attr *pthreadattr, size *uintptr) int32

pthread_attr_getstacksize function #

go:nosplit go:cgo_unsafe_args

func pthread_attr_getstacksize(attr *pthreadattr, size *uintptr) int32

pthread_attr_getstacksize function #

go:nosplit

func pthread_attr_getstacksize(attr *pthread_attr, size *uint64) int32

pthread_attr_getstacksize_trampoline function #

func pthread_attr_getstacksize_trampoline()

pthread_attr_getstacksize_trampoline function #

func pthread_attr_getstacksize_trampoline()

pthread_attr_init function #

func pthread_attr_init(attr *pthreadattr) int32

pthread_attr_init function #

go:nosplit go:cgo_unsafe_args

func pthread_attr_init(attr *pthreadattr) int32

pthread_attr_init function #

go:nosplit go:cgo_unsafe_args

func pthread_attr_init(attr *pthreadattr) int32

pthread_attr_init function #

go:nosplit

func pthread_attr_init(attr *pthread_attr) int32

pthread_attr_init1 function #

func pthread_attr_init1(attr uintptr) int32

pthread_attr_init_trampoline function #

func pthread_attr_init_trampoline()

pthread_attr_init_trampoline function #

func pthread_attr_init_trampoline()

pthread_attr_setdetachstate function #

go:nosplit go:cgo_unsafe_args

func pthread_attr_setdetachstate(attr *pthreadattr, state int) int32

pthread_attr_setdetachstate function #

go:nosplit go:cgo_unsafe_args

func pthread_attr_setdetachstate(attr *pthreadattr, state int) int32

pthread_attr_setdetachstate function #

go:nosplit

func pthread_attr_setdetachstate(attr *pthread_attr, state int32) int32

pthread_attr_setdetachstate function #

func pthread_attr_setdetachstate(attr *pthreadattr, state int32) int32

pthread_attr_setdetachstate1 function #

func pthread_attr_setdetachstate1(attr uintptr, state int32) int32

pthread_attr_setdetachstate_trampoline function #

func pthread_attr_setdetachstate_trampoline()

pthread_attr_setdetachstate_trampoline function #

func pthread_attr_setdetachstate_trampoline()

pthread_attr_setstack function #

func pthread_attr_setstack(attr *pthreadattr, addr uintptr, size uint64) int32

pthread_attr_setstackaddr function #

go:nosplit

func pthread_attr_setstackaddr(attr *pthread_attr, stk unsafe.Pointer) int32

pthread_attr_setstacksize function #

go:nosplit

func pthread_attr_setstacksize(attr *pthread_attr, size uint64) int32

pthread_attr_setstacksize1 function #

func pthread_attr_setstacksize1(attr uintptr, size uint64) int32

pthread_cond_init function #

go:nosplit go:cgo_unsafe_args

func pthread_cond_init(c *pthreadcond, attr *pthreadcondattr) int32

pthread_cond_init_trampoline function #

func pthread_cond_init_trampoline()

pthread_cond_signal function #

go:nosplit go:cgo_unsafe_args

func pthread_cond_signal(c *pthreadcond) int32

pthread_cond_signal_trampoline function #

func pthread_cond_signal_trampoline()

pthread_cond_timedwait_relative_np function #

go:nosplit go:cgo_unsafe_args

func pthread_cond_timedwait_relative_np(c *pthreadcond, m *pthreadmutex, t *timespec) int32

pthread_cond_timedwait_relative_np_trampoline function #

func pthread_cond_timedwait_relative_np_trampoline()

pthread_cond_wait function #

go:nosplit go:cgo_unsafe_args

func pthread_cond_wait(c *pthreadcond, m *pthreadmutex) int32

pthread_cond_wait_trampoline function #

func pthread_cond_wait_trampoline()

pthread_create function #

go:nosplit go:cgo_unsafe_args

func pthread_create(attr *pthreadattr, start uintptr, arg unsafe.Pointer) int32

pthread_create function #

go:nosplit

func pthread_create(tid *pthread, attr *pthread_attr, fn *funcDescriptor, arg unsafe.Pointer) int32

pthread_create function #

func pthread_create(thread *pthread, attr *pthreadattr, fn uintptr, arg unsafe.Pointer) int32

pthread_create function #

go:nosplit go:cgo_unsafe_args

func pthread_create(attr *pthreadattr, start uintptr, arg unsafe.Pointer) int32

pthread_create1 function #

func pthread_create1(tid uintptr, attr uintptr, fn uintptr, arg uintptr) int32

pthread_create_trampoline function #

func pthread_create_trampoline()

pthread_create_trampoline function #

func pthread_create_trampoline()

pthread_key_create_trampoline function #

func pthread_key_create_trampoline()

pthread_kill function #

go:nosplit go:cgo_unsafe_args

func pthread_kill(t pthread, sig uint32)

pthread_kill_trampoline function #

func pthread_kill_trampoline()

pthread_mutex_init function #

go:nosplit go:cgo_unsafe_args

func pthread_mutex_init(m *pthreadmutex, attr *pthreadmutexattr) int32

pthread_mutex_init_trampoline function #

func pthread_mutex_init_trampoline()

pthread_mutex_lock function #

go:nosplit go:cgo_unsafe_args

func pthread_mutex_lock(m *pthreadmutex) int32

pthread_mutex_lock_trampoline function #

func pthread_mutex_lock_trampoline()

pthread_mutex_unlock function #

go:nosplit go:cgo_unsafe_args

func pthread_mutex_unlock(m *pthreadmutex) int32

pthread_mutex_unlock_trampoline function #

func pthread_mutex_unlock_trampoline()

pthread_self function #

go:nosplit

func pthread_self() pthread

pthread_self function #

go:nosplit go:cgo_unsafe_args

func pthread_self() (t pthread)

pthread_self function #

func pthread_self() pthread

pthread_self_trampoline function #

func pthread_self_trampoline()

pthread_setspecific_trampoline function #

func pthread_setspecific_trampoline()

ptr method #

go:nosplit

func (pp puintptr) ptr() *p

ptr method #

go:nosplit

func (mp muintptr) ptr() *m

ptr method #

go:nosplit

func (gp guintptr) ptr() *g

ptr method #

func (p memHdrPtr) ptr() *memHdr

ptr method #

ptr returns the *gclink form of p. The result should be used for accessing fields, not stored in other data structures.

func (p gclinkptr) ptr() *gclink

ptrbit method #

ptrbit returns the i'th bit in bv. ptrbit is less efficient than iterating directly over bitvector bits, and should only be used in non-performance-critical code. See adjustpointers for an example of a high-efficiency walk of a bitvector.

func (bv *bitvector) ptrbit(i uintptr) uint8

publicationBarrier function #

publicationBarrier performs a store/store barrier (a "publication" or "export" barrier). Some form of synchronization is required between initializing an object and making that object accessible to another processor. Without synchronization, the initialization writes and the "publication" write may be reordered, allowing the other processor to follow the pointer and observe an uninitialized object. In general, higher-level synchronization should be used, such as locking or an atomic pointer write. publicationBarrier is for when those aren't an option, such as in the implementation of the memory manager. There's no corresponding barrier for the read side because the read side naturally has a data dependency order. All architectures that Go supports or seems likely to ever support automatically enforce data dependency ordering.

func publicationBarrier()

publishInfo method #

publishInfo updates pd.atomicInfo (returned by pd.info) using the other values in pd. It must be called while holding pd.lock, and it must be called after changing anything that might affect the info bits. In practice this means after changing closing or changing rd or wd from < 0 to >= 0.

func (pd *pollDesc) publishInfo()

push method #

It is not allowed to allocate memory in the signal handler.

func (q *noteQueue) push(item *byte) bool

push method #

push adds gp to the head of l.

func (l *gList) push(gp *g)

push method #

push adds gp to the head of q.

func (q *gQueue) push(gp *g)

push method #

func (head *lfstack) push(node *lfnode)

push method #

push queues buf into queue of buffers.

func (q *traceBufQueue) push(buf *traceBuf)

push method #

push adds span s to buffer b. push is safe to call concurrently with other push and pop operations.

func (b *spanSet) push(s *mspan)

pushAll method #

pushAll prepends all Gs in q to l.

func (l *gList) pushAll(q gQueue)

pushBack method #

pushBack adds gp to the tail of q.

func (q *gQueue) pushBack(gp *g)

pushBackAll method #

pushBackAll adds all Gs in q2 to the tail of q. After this q2 must not be used.

func (q *gQueue) pushBackAll(q2 gQueue)

pushCall method #

func (c *sigctxt) pushCall(targetPC uintptr, resumePC uintptr)

pushCall method #

func (c *sigctxt) pushCall(targetPC uintptr, resumePC uintptr)

pushCall method #

func (c *sigctxt) pushCall(targetPC uintptr, resumePC uintptr)

pushCall method #

func (c *sigctxt) pushCall(targetPC uintptr, resumePC uintptr)

pushCall method #

func (c *sigctxt) pushCall(targetPC uintptr, resumePC uintptr)

pushCall method #

func (c *sigctxt) pushCall(targetPC uintptr, resumePC uintptr)

pushCall method #

func (c *sigctxt) pushCall(targetPC uintptr, resumePC uintptr)

pushCall method #

func (c *sigctxt) pushCall(targetPC uintptr, resumePC uintptr)

pushCall method #

func (c *sigctxt) pushCall(targetPC uintptr, resumePC uintptr)

pushCall method #

func (c *sigctxt) pushCall(targetPC uintptr, resumePC uintptr)

put method #

put returns a unique id for the type typ and caches it in the table, if it's seeing it for the first time. N.B. typ must be kept alive forever for this to work correctly.

func (t *traceTypeTable) put(typ *abi.Type) uint64

put method #

put adds a string to the table, emits it, and returns a unique ID for it.

func (t *traceStringTable) put(gen uintptr, s string) uint64

put method #

put inserts the data into the table. It's always safe for callers to noescape data because put copies its bytes. Returns a unique ID for the data and whether this is the first time the data has been added to the map.

func (tab *traceMap) put(data unsafe.Pointer, size uintptr) (uint64, bool)

put method #

put returns a unique id for the stack trace pcs and caches it in the table, if it sees the trace for the first time.

func (t *traceStackTable) put(pcs []uintptr) uint64

put method #

put enqueues a pointer for the garbage collector to trace. obj must point to the beginning of a heap object or an oblet. go:nowritebarrierrec

func (w *gcWork) put(obj uintptr)

putBatch method #

putBatch performs a put on every pointer in obj. See put for constraints on these pointers. go:nowritebarrierrec

func (w *gcWork) putBatch(obj []uintptr)

putCachedDlogger function #

putCachedDlogger attempts to return l to the local cache. It returns false if this fails.

func putCachedDlogger(l *dloggerImpl) bool

putCachedDlogger function #

func putCachedDlogger(l *dloggerImpl) bool

putExtraM function #

Returns an extra M back to the list. mp must be from getExtraM. Newly allocated M's should use addExtraM. go:nosplit

func putExtraM(mp *m)

putFast method #

putFast does a put and reports whether it can be done quickly otherwise it returns false and the caller needs to call put. go:nowritebarrierrec

func (w *gcWork) putFast(obj uintptr) bool

putPtr method #

Add p as a potential pointer to a stack object. p must be a stack address.

func (s *stackScanState) putPtr(p uintptr, conservative bool)

putempty function #

putempty puts a workbuf onto the work.empty list. Upon entry this goroutine owns b. The lfstack.push relinquishes ownership. go:nowritebarrier

func putempty(b *workbuf)

putfull function #

putfull puts the workbuf on the work.full list for the GC. putfull accepts partially full buffers so the GC can avoid competing with the mutators for ownership of partially full buffers. go:nowritebarrier

func putfull(b *workbuf)

pwrite function #

go:noescape

func pwrite(fd int32, buf unsafe.Pointer, nbytes int32, offset int64) int32

queue method #

queue adds s to the blocked goroutines in semaRoot.

func (root *semaRoot) queue(addr *uint32, s *sudog, lifo bool)

queuefinalizer function #

func queuefinalizer(p unsafe.Pointer, fn *funcval, nret uintptr, fint *_type, ot *ptrtype)

r0 method #

func (c *sigctxt) r0() uint64

r0 method #

func (c *sigctxt) r0() uint64

r0 method #

func (c *sigctxt) r0() uint32

r0 method #

func (c *sigctxt) r0() uint64

r0 method #

func (c *sigctxt) r0() uint32

r0 method #

func (c *sigctxt) r0() uint64

r0 method #

func (c *sigctxt) r0() uint64

r0 method #

func (c *sigctxt) r0() uint64

r0 method #

func (c *sigctxt) r0() uint32

r0 method #

func (c *sigctxt) r0() uint64

r0 method #

func (c *sigctxt) r0() uint64

r0 method #

func (c *sigctxt) r0() uint64

r0 method #

func (c *sigctxt) r0() uint32

r0 method #

func (c *sigctxt) r0() uint64

r0 method #

func (c *sigctxt) r0() uint32

r0 method #

func (c *sigctxt) r0() uint64

r0 method #

func (c *sigctxt) r0() uint64

r1 method #

func (c *sigctxt) r1() uint32

r1 method #

func (c *sigctxt) r1() uint64

r1 method #

func (c *sigctxt) r1() uint64

r1 method #

func (c *sigctxt) r1() uint64

r1 method #

func (c *sigctxt) r1() uint64

r1 method #

func (c *sigctxt) r1() uint32

r1 method #

func (c *sigctxt) r1() uint32

r1 method #

func (c *sigctxt) r1() uint32

r1 method #

func (c *sigctxt) r1() uint64

r1 method #

func (c *sigctxt) r1() uint64

r1 method #

func (c *sigctxt) r1() uint64

r1 method #

func (c *sigctxt) r1() uint64

r1 method #

func (c *sigctxt) r1() uint64

r1 method #

func (c *sigctxt) r1() uint64

r1 method #

func (c *sigctxt) r1() uint64

r1 method #

func (c *sigctxt) r1() uint32

r1 method #

func (c *sigctxt) r1() uint64

r10 method #

func (c *sigctxt) r10() uint64

r10 method #

func (c *sigctxt) r10() uint64

r10 method #

func (c *sigctxt) r10() uint64

r10 method #

func (c *sigctxt) r10() uint64

r10 method #

func (c *sigctxt) r10() uint32

r10 method #

func (c *sigctxt) r10() uint64

r10 method #

func (c *sigctxt) r10() uint64

r10 method #

func (c *sigctxt) r10() uint64

r10 method #

func (c *sigctxt) r10() uint64

r10 method #

func (c *sigctxt) r10() uint64

r10 method #

func (c *sigctxt) r10() uint64

r10 method #

func (c *sigctxt) r10() uint32

r10 method #

func (c *sigctxt) r10() uint64

r10 method #

func (c *sigctxt) r10() uint64

r10 method #

func (c *sigctxt) r10() uint64

r10 method #

func (c *sigctxt) r10() uint32

r10 method #

func (c *sigctxt) r10() uint64

r10 method #

func (c *sigctxt) r10() uint64

r10 method #

func (c *sigctxt) r10() uint32

r10 method #

func (c *sigctxt) r10() uint64

r10 method #

func (c *sigctxt) r10() uint64

r10 method #

func (c *sigctxt) r10() uint32

r10 method #

func (c *sigctxt) r10() uint64

r10 method #

func (c *sigctxt) r10() uint64

r11 method #

func (c *sigctxt) r11() uint64

r11 method #

func (c *sigctxt) r11() uint64

r11 method #

func (c *sigctxt) r11() uint64

r11 method #

func (c *sigctxt) r11() uint64

r11 method #

func (c *sigctxt) r11() uint64

r11 method #

func (c *sigctxt) r11() uint64

r11 method #

func (c *sigctxt) r11() uint64

r11 method #

func (c *sigctxt) r11() uint64

r11 method #

func (c *sigctxt) r11() uint64

r11 method #

func (c *sigctxt) r11() uint64

r11 method #

func (c *sigctxt) r11() uint64

r11 method #

func (c *sigctxt) r11() uint64

r11 method #

func (c *sigctxt) r11() uint64

r11 method #

func (c *sigctxt) r11() uint64

r11 method #

func (c *sigctxt) r11() uint32

r11 method #

func (c *sigctxt) r11() uint64

r11 method #

func (c *sigctxt) r11() uint64

r11 method #

func (c *sigctxt) r11() uint64

r11 method #

func (c *sigctxt) r11() uint64

r11 method #

func (c *sigctxt) r11() uint64

r12 method #

func (c *sigctxt) r12() uint64

r12 method #

func (c *sigctxt) r12() uint64

r12 method #

func (c *sigctxt) r12() uint64

r12 method #

func (c *sigctxt) r12() uint64

r12 method #

func (c *sigctxt) r12() uint64

r12 method #

func (c *sigctxt) r12() uint64

r12 method #

func (c *sigctxt) r12() uint64

r12 method #

func (c *sigctxt) r12() uint64

r12 method #

func (c *sigctxt) r12() uint64

r12 method #

func (c *sigctxt) r12() uint64

r12 method #

func (c *sigctxt) r12() uint64

r12 method #

func (c *sigctxt) r12() uint64

r12 method #

func (c *sigctxt) r12() uint64

r12 method #

func (c *sigctxt) r12() uint64

r12 method #

func (c *sigctxt) r12() uint64

r12 method #

func (c *sigctxt) r12() uint64

r12 method #

func (c *sigctxt) r12() uint32

r12 method #

func (c *sigctxt) r12() uint64

r12 method #

func (c *sigctxt) r12() uint64

r12 method #

func (c *sigctxt) r12() uint64

r13 method #

func (c *sigctxt) r13() uint64

r13 method #

func (c *sigctxt) r13() uint64

r13 method #

func (c *sigctxt) r13() uint64

r13 method #

func (c *sigctxt) r13() uint64

r13 method #

func (c *sigctxt) r13() uint64

r13 method #

func (c *sigctxt) r13() uint64

r13 method #

func (c *sigctxt) r13() uint64

r13 method #

func (c *sigctxt) r13() uint64

r13 method #

func (c *sigctxt) r13() uint64

r13 method #

func (c *sigctxt) r13() uint32

r13 method #

func (c *sigctxt) r13() uint64

r13 method #

func (c *sigctxt) r13() uint64

r13 method #

func (c *sigctxt) r13() uint64

r13 method #

func (c *sigctxt) r13() uint64

r13 method #

func (c *sigctxt) r13() uint64

r13 method #

func (c *sigctxt) r13() uint64

r13 method #

func (c *sigctxt) r13() uint64

r13 method #

func (c *sigctxt) r13() uint64

r13 method #

func (c *sigctxt) r13() uint64

r13 method #

func (c *sigctxt) r13() uint64

r14 method #

func (c *sigctxt) r14() uint64

r14 method #

func (c *sigctxt) r14() uint64

r14 method #

func (c *sigctxt) r14() uint64

r14 method #

func (c *sigctxt) r14() uint64

r14 method #

func (c *sigctxt) r14() uint64

r14 method #

func (c *sigctxt) r14() uint64

r14 method #

func (c *sigctxt) r14() uint64

r14 method #

func (c *sigctxt) r14() uint64

r14 method #

func (c *sigctxt) r14() uint64

r14 method #

func (c *sigctxt) r14() uint64

r14 method #

func (c *sigctxt) r14() uint64

r14 method #

func (c *sigctxt) r14() uint64

r14 method #

func (c *sigctxt) r14() uint64

r14 method #

func (c *sigctxt) r14() uint32

r14 method #

func (c *sigctxt) r14() uint64

r14 method #

func (c *sigctxt) r14() uint64

r14 method #

func (c *sigctxt) r14() uint64

r14 method #

func (c *sigctxt) r14() uint64

r14 method #

func (c *sigctxt) r14() uint64

r14 method #

func (c *sigctxt) r14() uint64

r15 method #

func (c *sigctxt) r15() uint64

r15 method #

func (c *sigctxt) r15() uint64

r15 method #

func (c *sigctxt) r15() uint64

r15 method #

func (c *sigctxt) r15() uint64

r15 method #

func (c *sigctxt) r15() uint64

r15 method #

func (c *sigctxt) r15() uint64

r15 method #

func (c *sigctxt) r15() uint64

r15 method #

func (c *sigctxt) r15() uint64

r15 method #

func (c *sigctxt) r15() uint64

r15 method #

func (c *sigctxt) r15() uint32

r15 method #

func (c *sigctxt) r15() uint64

r15 method #

func (c *sigctxt) r15() uint64

r15 method #

func (c *sigctxt) r15() uint64

r15 method #

func (c *sigctxt) r15() uint64

r15 method #

func (c *sigctxt) r15() uint64

r15 method #

func (c *sigctxt) r15() uint64

r15 method #

func (c *sigctxt) r15() uint64

r15 method #

func (c *sigctxt) r15() uint64

r15 method #

func (c *sigctxt) r15() uint64

r15 method #

func (c *sigctxt) r15() uint64

r16 method #

func (c *sigctxt) r16() uint64

r16 method #

func (c *sigctxt) r16() uint64

r16 method #

func (c *sigctxt) r16() uint64

r16 method #

func (c *sigctxt) r16() uint32

r16 method #

func (c *sigctxt) r16() uint64

r16 method #

func (c *sigctxt) r16() uint64

r16 method #

func (c *sigctxt) r16() uint64

r16 method #

func (c *sigctxt) r16() uint64

r16 method #

func (c *sigctxt) r16() uint64

r16 method #

func (c *sigctxt) r16() uint64

r16 method #

func (c *sigctxt) r16() uint64

r16 method #

func (c *sigctxt) r16() uint64

r17 method #

func (c *sigctxt) r17() uint64

r17 method #

func (c *sigctxt) r17() uint64

r17 method #

func (c *sigctxt) r17() uint64

r17 method #

func (c *sigctxt) r17() uint64

r17 method #

func (c *sigctxt) r17() uint64

r17 method #

func (c *sigctxt) r17() uint64

r17 method #

func (c *sigctxt) r17() uint64

r17 method #

func (c *sigctxt) r17() uint64

r17 method #

func (c *sigctxt) r17() uint64

r17 method #

func (c *sigctxt) r17() uint32

r17 method #

func (c *sigctxt) r17() uint64

r17 method #

func (c *sigctxt) r17() uint64

r18 method #

func (c *sigctxt) r18() uint64

r18 method #

func (c *sigctxt) r18() uint64

r18 method #

func (c *sigctxt) r18() uint32

r18 method #

func (c *sigctxt) r18() uint64

r18 method #

func (c *sigctxt) r18() uint64

r18 method #

func (c *sigctxt) r18() uint64

r18 method #

func (c *sigctxt) r18() uint64

r18 method #

func (c *sigctxt) r18() uint64

r18 method #

func (c *sigctxt) r18() uint64

r18 method #

func (c *sigctxt) r18() uint64

r18 method #

func (c *sigctxt) r18() uint64

r18 method #

func (c *sigctxt) r18() uint64

r19 method #

func (c *sigctxt) r19() uint32

r19 method #

func (c *sigctxt) r19() uint64

r19 method #

func (c *sigctxt) r19() uint64

r19 method #

func (c *sigctxt) r19() uint64

r19 method #

func (c *sigctxt) r19() uint64

r19 method #

func (c *sigctxt) r19() uint64

r19 method #

func (c *sigctxt) r19() uint64

r19 method #

func (c *sigctxt) r19() uint64

r19 method #

func (c *sigctxt) r19() uint64

r19 method #

func (c *sigctxt) r19() uint64

r19 method #

func (c *sigctxt) r19() uint64

r19 method #

func (c *sigctxt) r19() uint64

r2 method #

func (c *sigctxt) r2() uint64

r2 method #

func (c *sigctxt) r2() uint32

r2 method #

func (c *sigctxt) r2() uint64

r2 method #

func (c *sigctxt) r2() uint64

r2 method #

func (c *sigctxt) r2() uint64

r2 method #

func (c *sigctxt) r2() uint32

r2 method #

func (c *sigctxt) r2() uint64

r2 method #

func (c *sigctxt) r2() uint64

r2 method #

func (c *sigctxt) r2() uint32

r2 method #

func (c *sigctxt) r2() uint64

r2 method #

func (c *sigctxt) r2() uint64

r2 method #

func (c *sigctxt) r2() uint64

r2 method #

func (c *sigctxt) r2() uint64

r2 method #

func (c *sigctxt) r2() uint32

r2 method #

func (c *sigctxt) r2() uint64

r2 method #

func (c *sigctxt) r2() uint32

r2 method #

func (c *sigctxt) r2() uint64

r20 method #

func (c *sigctxt) r20() uint64

r20 method #

func (c *sigctxt) r20() uint64

r20 method #

func (c *sigctxt) r20() uint64

r20 method #

func (c *sigctxt) r20() uint64

r20 method #

func (c *sigctxt) r20() uint64

r20 method #

func (c *sigctxt) r20() uint64

r20 method #

func (c *sigctxt) r20() uint32

r20 method #

func (c *sigctxt) r20() uint64

r20 method #

func (c *sigctxt) r20() uint64

r20 method #

func (c *sigctxt) r20() uint64

r20 method #

func (c *sigctxt) r20() uint64

r20 method #

func (c *sigctxt) r20() uint64

r21 method #

func (c *sigctxt) r21() uint32

r21 method #

func (c *sigctxt) r21() uint64

r21 method #

func (c *sigctxt) r21() uint64

r21 method #

func (c *sigctxt) r21() uint64

r21 method #

func (c *sigctxt) r21() uint64

r21 method #

func (c *sigctxt) r21() uint64

r21 method #

func (c *sigctxt) r21() uint64

r21 method #

func (c *sigctxt) r21() uint64

r21 method #

func (c *sigctxt) r21() uint64

r21 method #

func (c *sigctxt) r21() uint64

r21 method #

func (c *sigctxt) r21() uint64

r21 method #

func (c *sigctxt) r21() uint64

r22 method #

func (c *sigctxt) r22() uint64

r22 method #

func (c *sigctxt) r22() uint32

r22 method #

func (c *sigctxt) r22() uint64

r22 method #

func (c *sigctxt) r22() uint64

r22 method #

func (c *sigctxt) r22() uint64

r22 method #

func (c *sigctxt) r22() uint64

r22 method #

func (c *sigctxt) r22() uint64

r22 method #

func (c *sigctxt) r22() uint64

r22 method #

func (c *sigctxt) r22() uint64

r22 method #

func (c *sigctxt) r22() uint64

r22 method #

func (c *sigctxt) r22() uint64

r22 method #

func (c *sigctxt) r22() uint64

r23 method #

func (c *sigctxt) r23() uint64

r23 method #

func (c *sigctxt) r23() uint64

r23 method #

func (c *sigctxt) r23() uint64

r23 method #

func (c *sigctxt) r23() uint64

r23 method #

func (c *sigctxt) r23() uint64

r23 method #

func (c *sigctxt) r23() uint64

r23 method #

func (c *sigctxt) r23() uint64

r23 method #

func (c *sigctxt) r23() uint64

r23 method #

func (c *sigctxt) r23() uint64

r23 method #

func (c *sigctxt) r23() uint32

r23 method #

func (c *sigctxt) r23() uint64

r23 method #

func (c *sigctxt) r23() uint64

r24 method #

func (c *sigctxt) r24() uint64

r24 method #

func (c *sigctxt) r24() uint32

r24 method #

func (c *sigctxt) r24() uint64

r24 method #

func (c *sigctxt) r24() uint64

r24 method #

func (c *sigctxt) r24() uint64

r24 method #

func (c *sigctxt) r24() uint64

r24 method #

func (c *sigctxt) r24() uint64

r24 method #

func (c *sigctxt) r24() uint64

r24 method #

func (c *sigctxt) r24() uint64

r24 method #

func (c *sigctxt) r24() uint64

r24 method #

func (c *sigctxt) r24() uint64

r24 method #

func (c *sigctxt) r24() uint64

r25 method #

func (c *sigctxt) r25() uint64

r25 method #

func (c *sigctxt) r25() uint64

r25 method #

func (c *sigctxt) r25() uint64

r25 method #

func (c *sigctxt) r25() uint64

r25 method #

func (c *sigctxt) r25() uint64

r25 method #

func (c *sigctxt) r25() uint64

r25 method #

func (c *sigctxt) r25() uint64

r25 method #

func (c *sigctxt) r25() uint64

r25 method #

func (c *sigctxt) r25() uint64

r25 method #

func (c *sigctxt) r25() uint64

r25 method #

func (c *sigctxt) r25() uint64

r25 method #

func (c *sigctxt) r25() uint32

r26 method #

func (c *sigctxt) r26() uint64

r26 method #

func (c *sigctxt) r26() uint64

r26 method #

func (c *sigctxt) r26() uint64

r26 method #

func (c *sigctxt) r26() uint64

r26 method #

func (c *sigctxt) r26() uint64

r26 method #

func (c *sigctxt) r26() uint32

r26 method #

func (c *sigctxt) r26() uint64

r26 method #

func (c *sigctxt) r26() uint64

r26 method #

func (c *sigctxt) r26() uint64

r26 method #

func (c *sigctxt) r26() uint64

r26 method #

func (c *sigctxt) r26() uint64

r26 method #

func (c *sigctxt) r26() uint64

r27 method #

func (c *sigctxt) r27() uint64

r27 method #

func (c *sigctxt) r27() uint64

r27 method #

func (c *sigctxt) r27() uint64

r27 method #

func (c *sigctxt) r27() uint64

r27 method #

func (c *sigctxt) r27() uint64

r27 method #

func (c *sigctxt) r27() uint64

r27 method #

func (c *sigctxt) r27() uint64

r27 method #

func (c *sigctxt) r27() uint64

r27 method #

func (c *sigctxt) r27() uint64

r27 method #

func (c *sigctxt) r27() uint64

r27 method #

func (c *sigctxt) r27() uint32

r27 method #

func (c *sigctxt) r27() uint64

r28 method #

func (c *sigctxt) r28() uint64

r28 method #

func (c *sigctxt) r28() uint64

r28 method #

func (c *sigctxt) r28() uint64

r28 method #

func (c *sigctxt) r28() uint32

r28 method #

func (c *sigctxt) r28() uint64

r28 method #

func (c *sigctxt) r28() uint64

r28 method #

func (c *sigctxt) r28() uint64

r28 method #

func (c *sigctxt) r28() uint64

r28 method #

func (c *sigctxt) r28() uint64

r28 method #

func (c *sigctxt) r28() uint64

r28 method #

func (c *sigctxt) r28() uint64

r28 method #

func (c *sigctxt) r28() uint64

r29 method #

func (c *sigctxt) r29() uint64

r29 method #

func (c *sigctxt) r29() uint64

r29 method #

func (c *sigctxt) r29() uint64

r29 method #

func (c *sigctxt) r29() uint64

r29 method #

func (c *sigctxt) r29() uint64

r29 method #

func (c *sigctxt) r29() uint64

r29 method #

func (c *sigctxt) r29() uint64

r29 method #

func (c *sigctxt) r29() uint64

r29 method #

func (c *sigctxt) r29() uint64

r29 method #

func (c *sigctxt) r29() uint32

r29 method #

func (c *sigctxt) r29() uint64

r29 method #

func (c *sigctxt) r29() uint64

r3 method #

func (c *sigctxt) r3() uint64

r3 method #

func (c *sigctxt) r3() uint64

r3 method #

func (c *sigctxt) r3() uint32

r3 method #

func (c *sigctxt) r3() uint64

r3 method #

func (c *sigctxt) r3() uint64

r3 method #

func (c *sigctxt) r3() uint64

r3 method #

func (c *sigctxt) r3() uint32

r3 method #

func (c *sigctxt) r3() uint64

r3 method #

func (c *sigctxt) r3() uint64

r3 method #

func (c *sigctxt) r3() uint64

r3 method #

func (c *sigctxt) r3() uint32

r3 method #

func (c *sigctxt) r3() uint32

r3 method #

func (c *sigctxt) r3() uint64

r3 method #

func (c *sigctxt) r3() uint64

r3 method #

func (c *sigctxt) r3() uint64

r3 method #

func (c *sigctxt) r3() uint64

r3 method #

func (c *sigctxt) r3() uint32

r30 method #

func (c *sigctxt) r30() uint64

r30 method #

func (c *sigctxt) r30() uint64

r30 method #

func (c *sigctxt) r30() uint64

r30 method #

func (c *sigctxt) r30() uint64

r30 method #

func (c *sigctxt) r30() uint64

r30 method #

func (c *sigctxt) r30() uint32

r30 method #

func (c *sigctxt) r30() uint64

r31 method #

func (c *sigctxt) r31() uint64

r31 method #

func (c *sigctxt) r31() uint64

r31 method #

func (c *sigctxt) r31() uint64

r31 method #

func (c *sigctxt) r31() uint64

r31 method #

func (c *sigctxt) r31() uint64

r31 method #

func (c *sigctxt) r31() uint32

r31 method #

func (c *sigctxt) r31() uint64

r4 method #

func (c *sigctxt) r4() uint64

r4 method #

func (c *sigctxt) r4() uint64

r4 function #

func r4(p unsafe.Pointer) uintptr

r4 method #

func (c *sigctxt) r4() uint64

r4 method #

func (c *sigctxt) r4() uint32

r4 method #

func (c *sigctxt) r4() uint32

r4 method #

func (c *sigctxt) r4() uint32

r4 method #

func (c *sigctxt) r4() uint32

r4 method #

func (c *sigctxt) r4() uint64

r4 method #

func (c *sigctxt) r4() uint64

r4 method #

func (c *sigctxt) r4() uint64

r4 method #

func (c *sigctxt) r4() uint64

r4 method #

func (c *sigctxt) r4() uint64

r4 method #

func (c *sigctxt) r4() uint64

r4 method #

func (c *sigctxt) r4() uint64

r4 method #

func (c *sigctxt) r4() uint32

r4 method #

func (c *sigctxt) r4() uint64

r4 method #

func (c *sigctxt) r4() uint64

r5 method #

func (c *sigctxt) r5() uint64

r5 method #

func (c *sigctxt) r5() uint32

r5 method #

func (c *sigctxt) r5() uint32

r5 method #

func (c *sigctxt) r5() uint64

r5 method #

func (c *sigctxt) r5() uint64

r5 method #

func (c *sigctxt) r5() uint64

r5 method #

func (c *sigctxt) r5() uint64

r5 method #

func (c *sigctxt) r5() uint32

r5 method #

func (c *sigctxt) r5() uint32

r5 method #

func (c *sigctxt) r5() uint64

r5 method #

func (c *sigctxt) r5() uint64

r5 method #

func (c *sigctxt) r5() uint64

r5 method #

func (c *sigctxt) r5() uint32

r5 method #

func (c *sigctxt) r5() uint64

r5 method #

func (c *sigctxt) r5() uint64

r5 method #

func (c *sigctxt) r5() uint64

r5 method #

func (c *sigctxt) r5() uint64

r6 method #

func (c *sigctxt) r6() uint64

r6 method #

func (c *sigctxt) r6() uint32

r6 method #

func (c *sigctxt) r6() uint64

r6 method #

func (c *sigctxt) r6() uint64

r6 method #

func (c *sigctxt) r6() uint64

r6 method #

func (c *sigctxt) r6() uint32

r6 method #

func (c *sigctxt) r6() uint64

r6 method #

func (c *sigctxt) r6() uint64

r6 method #

func (c *sigctxt) r6() uint64

r6 method #

func (c *sigctxt) r6() uint32

r6 method #

func (c *sigctxt) r6() uint64

r6 method #

func (c *sigctxt) r6() uint32

r6 method #

func (c *sigctxt) r6() uint64

r6 method #

func (c *sigctxt) r6() uint64

r6 method #

func (c *sigctxt) r6() uint64

r6 method #

func (c *sigctxt) r6() uint32

r6 method #

func (c *sigctxt) r6() uint64

r7 method #

func (c *sigctxt) r7() uint64

r7 method #

func (c *sigctxt) r7() uint32

r7 method #

func (c *sigctxt) r7() uint32

r7 method #

func (c *sigctxt) r7() uint64

r7 method #

func (c *sigctxt) r7() uint64

r7 method #

func (c *sigctxt) r7() uint64

r7 method #

func (c *sigctxt) r7() uint32

r7 method #

func (c *sigctxt) r7() uint64

r7 method #

func (c *sigctxt) r7() uint64

r7 method #

func (c *sigctxt) r7() uint64

r7 method #

func (c *sigctxt) r7() uint32

r7 method #

func (c *sigctxt) r7() uint64

r7 method #

func (c *sigctxt) r7() uint64

r7 method #

func (c *sigctxt) r7() uint64

r7 method #

func (c *sigctxt) r7() uint64

r7 method #

func (c *sigctxt) r7() uint32

r7 method #

func (c *sigctxt) r7() uint64

r8 function #

func r8(p unsafe.Pointer) uintptr

r8 method #

func (c *sigctxt) r8() uint32

r8 method #

func (c *sigctxt) r8() uint64

r8 method #

func (c *sigctxt) r8() uint32

r8 method #

func (c *sigctxt) r8() uint64

r8 method #

func (c *sigctxt) r8() uint64

r8 method #

func (c *sigctxt) r8() uint64

r8 method #

func (c *sigctxt) r8() uint64

r8 method #

func (c *sigctxt) r8() uint64

r8 method #

func (c *sigctxt) r8() uint64

r8 method #

func (c *sigctxt) r8() uint64

r8 method #

func (c *sigctxt) r8() uint64

r8 method #

func (c *sigctxt) r8() uint64

r8 method #

func (c *sigctxt) r8() uint64

r8 method #

func (c *sigctxt) r8() uint64

r8 method #

func (c *sigctxt) r8() uint64

r8 method #

func (c *sigctxt) r8() uint64

r8 method #

func (c *sigctxt) r8() uint32

r8 method #

func (c *sigctxt) r8() uint64

r8 method #

func (c *sigctxt) r8() uint64

r8 method #

func (c *sigctxt) r8() uint64

r8 method #

func (c *sigctxt) r8() uint64

r8 method #

func (c *sigctxt) r8() uint64

r8 method #

func (c *sigctxt) r8() uint32

r8 method #

func (c *sigctxt) r8() uint32

r9 method #

func (c *sigctxt) r9() uint64

r9 method #

func (c *sigctxt) r9() uint64

r9 method #

func (c *sigctxt) r9() uint64

r9 method #

func (c *sigctxt) r9() uint32

r9 method #

func (c *sigctxt) r9() uint64

r9 method #

func (c *sigctxt) r9() uint32

r9 method #

func (c *sigctxt) r9() uint64

r9 method #

func (c *sigctxt) r9() uint32

r9 method #

func (c *sigctxt) r9() uint64

r9 method #

func (c *sigctxt) r9() uint64

r9 method #

func (c *sigctxt) r9() uint64

r9 method #

func (c *sigctxt) r9() uint64

r9 method #

func (c *sigctxt) r9() uint64

r9 method #

func (c *sigctxt) r9() uint64

r9 method #

func (c *sigctxt) r9() uint64

r9 method #

func (c *sigctxt) r9() uint64

r9 method #

func (c *sigctxt) r9() uint64

r9 method #

func (c *sigctxt) r9() uint64

r9 method #

func (c *sigctxt) r9() uint64

r9 method #

func (c *sigctxt) r9() uint32

r9 method #

func (c *sigctxt) r9() uint64

r9 method #

func (c *sigctxt) r9() uint32

r9 method #

func (c *sigctxt) r9() uint64

r9 method #

func (c *sigctxt) r9() uint64

ra method #

func (c *sigctxt) ra() uint64

ra method #

func (c *sigctxt) ra() uint64

ra method #

func (c *sigctxt) ra() uint64

raceReadObjectPC function #

For all functions accepting callerpc and pc, callerpc is a return PC of the function that calls this function, pc is start PC of the function that calls this function.

func raceReadObjectPC(t *_type, addr unsafe.Pointer, callerpc uintptr, pc uintptr)

raceReadObjectPC function #

func raceReadObjectPC(t *_type, addr unsafe.Pointer, callerpc uintptr, pc uintptr)

raceSymbolizeCode function #

raceSymbolizeCode reads ctx.pc and populates the rest of *ctx with information about the code at that pc. The race detector has already subtracted 1 from pcs, so they point to the last byte of call instructions (including calls to runtime.racewrite and friends). If the incoming pc is part of an inlined function, *ctx is populated with information about the inlined function, and on return ctx.pc is set to a pc in the logically containing function. (The race detector should call this function again with that pc.) If the incoming pc is not part of an inlined function, the return pc is unchanged.

func raceSymbolizeCode(ctx *symbolizeCodeContext)

raceSymbolizeData function #

func raceSymbolizeData(ctx *symbolizeDataContext)

raceWriteObjectPC function #

func raceWriteObjectPC(t *_type, addr unsafe.Pointer, callerpc uintptr, pc uintptr)

raceWriteObjectPC function #

func raceWriteObjectPC(t *_type, addr unsafe.Pointer, callerpc uintptr, pc uintptr)

race_Acquire function #

go:linkname race_Acquire internal/race.Acquire go:nosplit

func race_Acquire(addr unsafe.Pointer)

race_Disable function #

go:linkname race_Disable internal/race.Disable go:nosplit

func race_Disable()

race_Enable function #

go:linkname race_Enable internal/race.Enable go:nosplit

func race_Enable()

race_Errors function #

go:linkname race_Errors internal/race.Errors go:nosplit

func race_Errors() int

race_Read function #

go:linkname race_Read internal/race.Read go:nosplit

func race_Read(addr unsafe.Pointer)

race_ReadObjectPC function #

go:linkname race_ReadObjectPC internal/race.ReadObjectPC

func race_ReadObjectPC(t *abi.Type, addr unsafe.Pointer, callerpc uintptr, pc uintptr)

race_ReadPC function #

go:linkname race_ReadPC internal/race.ReadPC

func race_ReadPC(addr unsafe.Pointer, callerpc uintptr, pc uintptr)

race_ReadRange function #

go:linkname race_ReadRange internal/race.ReadRange go:nosplit

func race_ReadRange(addr unsafe.Pointer, len int)

race_Release function #

go:linkname race_Release internal/race.Release go:nosplit

func race_Release(addr unsafe.Pointer)

race_ReleaseMerge function #

go:linkname race_ReleaseMerge internal/race.ReleaseMerge go:nosplit

func race_ReleaseMerge(addr unsafe.Pointer)

race_Write function #

go:linkname race_Write internal/race.Write go:nosplit

func race_Write(addr unsafe.Pointer)

race_WriteObjectPC function #

go:linkname race_WriteObjectPC internal/race.WriteObjectPC

func race_WriteObjectPC(t *abi.Type, addr unsafe.Pointer, callerpc uintptr, pc uintptr)

race_WritePC function #

go:linkname race_WritePC internal/race.WritePC

func race_WritePC(addr unsafe.Pointer, callerpc uintptr, pc uintptr)

race_WriteRange function #

go:linkname race_WriteRange internal/race.WriteRange go:nosplit

func race_WriteRange(addr unsafe.Pointer, len int)

raceacquire function #

func raceacquire(addr unsafe.Pointer)

raceacquire function #

go:nosplit

func raceacquire(addr unsafe.Pointer)

raceacquirectx function #

func raceacquirectx(racectx uintptr, addr unsafe.Pointer)

raceacquirectx function #

go:nosplit

func raceacquirectx(racectx uintptr, addr unsafe.Pointer)

raceacquireg function #

go:nosplit

func raceacquireg(gp *g, addr unsafe.Pointer)

raceacquireg function #

func raceacquireg(gp *g, addr unsafe.Pointer)

raceaddr method #

func (sg *synctestGroup) raceaddr() unsafe.Pointer

raceaddr method #

func (c *hchan) raceaddr() unsafe.Pointer

racecall function #

racecall allows calling an arbitrary function fn from C race runtime with up to 4 uintptr arguments.

func racecall(fn *byte, arg0 uintptr, arg1 uintptr, arg2 uintptr, arg3 uintptr)

racecallback function #

Callback from C into Go, runs on g0.

func racecallback(cmd uintptr, ctx unsafe.Pointer)

racecallbackthunk function #

func racecallbackthunk(uintptr)

racectxend function #

go:nosplit

func racectxend(racectx uintptr)

racectxend function #

func racectxend(racectx uintptr)

racefingo function #

func racefingo()

racefingo function #

go:nosplit

func racefingo()

racefini function #

go:nosplit

func racefini()

racefini function #

func racefini()

racefree function #

func racefree(p unsafe.Pointer, sz uintptr)

racefree function #

go:nosplit

func racefree(p unsafe.Pointer, sz uintptr)

racefuncenter function #

func racefuncenter(callpc uintptr)

racefuncenterfp function #

func racefuncenterfp(fp uintptr)

racefuncexit function #

func racefuncexit()

racegoend function #

go:nosplit

func racegoend()

racegoend function #

func racegoend()

racegostart function #

go:nosplit

func racegostart(pc uintptr) uintptr

racegostart function #

func racegostart(pc uintptr) uintptr

raceinit function #

go:nosplit

func raceinit() (gctx uintptr, pctx uintptr)

raceinit function #

func raceinit() (uintptr, uintptr)

racemalloc function #

go:nosplit

func racemalloc(p unsafe.Pointer, sz uintptr)

racemalloc function #

func racemalloc(p unsafe.Pointer, sz uintptr)

racemapshadow function #

go:nosplit

func racemapshadow(addr unsafe.Pointer, size uintptr)

racemapshadow function #

func racemapshadow(addr unsafe.Pointer, size uintptr)

racenotify function #

Notify the race detector of a send or receive involving buffer entry idx and a channel c or its communicating partner sg. This function handles the special case of c.elemsize==0.

func racenotify(c *hchan, idx uint, sg *sudog)

raceproccreate function #

go:nosplit

func raceproccreate() uintptr

raceproccreate function #

func raceproccreate() uintptr

raceprocdestroy function #

go:nosplit

func raceprocdestroy(ctx uintptr)

raceprocdestroy function #

func raceprocdestroy(ctx uintptr)

raceread function #

func raceread(addr uintptr)

racereadpc function #

go:noescape

func racereadpc(addr unsafe.Pointer, callpc uintptr, pc uintptr)

racereadpc function #

func racereadpc(addr unsafe.Pointer, callerpc uintptr, pc uintptr)

racereadrange function #

func racereadrange(addr uintptr, size uintptr)

racereadrangepc function #

func racereadrangepc(addr unsafe.Pointer, sz uintptr, callerpc uintptr, pc uintptr)

racereadrangepc function #

go:nosplit

func racereadrangepc(addr unsafe.Pointer, sz uintptr, callpc uintptr, pc uintptr)

racereadrangepc1 function #

func racereadrangepc1(addr uintptr, size uintptr, pc uintptr)

racerelease function #

go:nosplit

func racerelease(addr unsafe.Pointer)

racerelease function #

func racerelease(addr unsafe.Pointer)

racereleaseacquire function #

go:nosplit

func racereleaseacquire(addr unsafe.Pointer)

racereleaseacquire function #

func racereleaseacquire(addr unsafe.Pointer)

racereleaseacquireg function #

func racereleaseacquireg(gp *g, addr unsafe.Pointer)

racereleaseacquireg function #

go:nosplit

func racereleaseacquireg(gp *g, addr unsafe.Pointer)

racereleaseg function #

go:nosplit

func racereleaseg(gp *g, addr unsafe.Pointer)

racereleaseg function #

func racereleaseg(gp *g, addr unsafe.Pointer)

racereleasemerge function #

go:nosplit

func racereleasemerge(addr unsafe.Pointer)

racereleasemerge function #

func racereleasemerge(addr unsafe.Pointer)

racereleasemergeg function #

func racereleasemergeg(gp *g, addr unsafe.Pointer)

racereleasemergeg function #

go:nosplit

func racereleasemergeg(gp *g, addr unsafe.Pointer)

racesync function #

func racesync(c *hchan, sg *sudog)

racewrite function #

func racewrite(addr uintptr)

racewritepc function #

func racewritepc(addr unsafe.Pointer, callerpc uintptr, pc uintptr)

racewritepc function #

go:noescape

func racewritepc(addr unsafe.Pointer, callpc uintptr, pc uintptr)

racewriterange function #

func racewriterange(addr uintptr, size uintptr)

racewriterangepc function #

func racewriterangepc(addr unsafe.Pointer, sz uintptr, callerpc uintptr, pc uintptr)

racewriterangepc function #

go:nosplit

func racewriterangepc(addr unsafe.Pointer, sz uintptr, callpc uintptr, pc uintptr)

racewriterangepc1 function #

func racewriterangepc1(addr uintptr, size uintptr, pc uintptr)

raise function #

go:nosplit

func raise(sig uint32)

raise function #

raise sends a signal to the calling thread. It must be nosplit because it is used by the signal handler before it definitely has a Go stack. go:nosplit

func raise(sig uint32)

raise function #

go:nosplit

func raise(sig uint32)

raise function #

func raise(sig uint32)

raise function #

go:nosplit go:cgo_unsafe_args

func raise(sig uint32)

raise function #

raise sends signal to the calling thread. It must be nosplit because it is used by the signal handler before it definitely has a Go stack. go:nosplit

func raise(sig uint32)

raise function #

raise sends a signal to the calling thread. It must be nosplit because it is used by the signal handler before it definitely has a Go stack. go:nosplit

func raise(sig uint32)

raise function #

go:nosplit go:nowritebarrierrec

func raise(sig uint32)

raise_trampoline function #

func raise_trampoline()

raisebadsignal function #

func raisebadsignal(sig uint32)

raisebadsignal function #

raisebadsignal is called when a signal is received on a non-Go thread, and the Go program does not want to handle it (that is, the program has not called os/signal.Notify for the signal).

func raisebadsignal(sig uint32, c *sigctxt)

raiseproc function #

go:nosplit

func raiseproc(sig uint32)

raiseproc function #

func raiseproc(sig uint32)

raiseproc function #

func raiseproc(sig uint32)

raiseproc function #

go:nosplit go:cgo_unsafe_args

func raiseproc(sig uint32)

raiseproc function #

func raiseproc(sig uint32)

raiseproc function #

go:nosplit go:cgo_unsafe_args

func raiseproc(sig uint32)

raiseproc function #

func raiseproc(sig uint32)

raiseproc function #

func raiseproc(sig uint32)

raiseproc function #

func raiseproc(sig uint32)

raiseproc_trampoline function #

func raiseproc_trampoline()

raiseproc_trampoline function #

func raiseproc_trampoline()

rand function #

rand returns a random uint64 from the per-m chacha8 state. This is called from compiler-generated code. Do not change signature: used via linkname from other packages. go:nosplit go:linkname rand

func rand() uint64

rand32 function #

rand32 is uint32(rand()), called from compiler-generated code. go:nosplit

func rand32() uint32

rand_fatal function #

go:linkname rand_fatal crypto/rand.fatal

func rand_fatal(s string)

randinit function #

randinit initializes the global random state. It must be called before any use of grand.

func randinit()

randn function #

randn is like rand() % n but faster. Do not change signature: used via linkname from other packages. go:nosplit go:linkname randn

func randn(n uint32) uint32

random_get function #

go:wasmimport wasi_snapshot_preview1 random_get go:noescape

func random_get(buf *byte, bufLen size) errno

raw method #

func (f *Func) raw() *_func

rawbyteslice function #

rawbyteslice allocates a new byte slice. The byte slice is not zeroed.

func rawbyteslice(size int) (b []byte)

rawruneslice function #

rawruneslice allocates a new rune slice. The rune slice is not zeroed.

func rawruneslice(size int) (b []rune)

rawstring function #

rawstring allocates storage for a new string. The returned string and byte slice both refer to the same storage. The storage is not zeroed. Callers should use b to set the string contents and then drop b.

func rawstring(size int) (s string, b []byte)

rawstringtmp function #

func rawstringtmp(buf *tmpBuf, l int) (s string, b []byte)

rax method #

func (c *sigctxt) rax() uint64

rax method #

func (c *sigctxt) rax() uint64

rax method #

func (c *sigctxt) rax() uint64

rax method #

func (c *sigctxt) rax() uint64

rax method #

func (c *sigctxt) rax() uint64

rax method #

func (c *sigctxt) rax() uint64

rax method #

func (c *sigctxt) rax() uint64

rbp method #

func (c *sigctxt) rbp() uint64

rbp method #

func (c *sigctxt) rbp() uint64

rbp method #

func (c *sigctxt) rbp() uint64

rbp method #

func (c *sigctxt) rbp() uint64

rbp method #

func (c *sigctxt) rbp() uint64

rbp method #

func (c *sigctxt) rbp() uint64

rbp method #

func (c *sigctxt) rbp() uint64

rbx method #

func (c *sigctxt) rbx() uint64

rbx method #

func (c *sigctxt) rbx() uint64

rbx method #

func (c *sigctxt) rbx() uint64

rbx method #

func (c *sigctxt) rbx() uint64

rbx method #

func (c *sigctxt) rbx() uint64

rbx method #

func (c *sigctxt) rbx() uint64

rbx method #

func (c *sigctxt) rbx() uint64

rctlblk_get_local_action function #

go:nosplit

func rctlblk_get_local_action(buf unsafe.Pointer) uintptr

rctlblk_get_local_flags function #

go:nosplit

func rctlblk_get_local_flags(buf unsafe.Pointer) uintptr

rctlblk_get_value function #

go:nosplit

func rctlblk_get_value(buf unsafe.Pointer) uint64

rctlblk_size function #

go:nosplit

func rctlblk_size() uintptr

rcx method #

func (c *sigctxt) rcx() uint64

rcx method #

func (c *sigctxt) rcx() uint64

rcx method #

func (c *sigctxt) rcx() uint64

rcx method #

func (c *sigctxt) rcx() uint64

rcx method #

func (c *sigctxt) rcx() uint64

rcx method #

func (c *sigctxt) rcx() uint64

rcx method #

func (c *sigctxt) rcx() uint64

rdi method #

func (c *sigctxt) rdi() uint64

rdi method #

func (c *sigctxt) rdi() uint64

rdi method #

func (c *sigctxt) rdi() uint64

rdi method #

func (c *sigctxt) rdi() uint64

rdi method #

func (c *sigctxt) rdi() uint64

rdi method #

func (c *sigctxt) rdi() uint64

rdi method #

func (c *sigctxt) rdi() uint64

rdx method #

func (c *sigctxt) rdx() uint64

rdx method #

func (c *sigctxt) rdx() uint64

rdx method #

func (c *sigctxt) rdx() uint64

rdx method #

func (c *sigctxt) rdx() uint64

rdx method #

func (c *sigctxt) rdx() uint64

rdx method #

func (c *sigctxt) rdx() uint64

rdx method #

func (c *sigctxt) rdx() uint64

read method #

read returns the current cycle count.

func (c *mProfCycleHolder) read() (cycle uint32)

read function #

read calls the read system call. It returns a non-negative number of bytes written or a negative errno value.

func read(fd int32, p unsafe.Pointer, n int32) int32

read function #

read calls the read system call. It returns a non-negative number of bytes written or a negative errno value.

func read(fd int32, p unsafe.Pointer, n int32) int32

read function #

func read(fd int32, p unsafe.Pointer, n int32) int32

read function #

go:nosplit

func read(fd int32, p unsafe.Pointer, n int32) int32

read function #

go:nosplit

func read(fd int32, buf unsafe.Pointer, nbyte int32) int32

read function #

func read(fd int32, p unsafe.Pointer, n int32) int32

read method #

func (b *profBuf) read(mode profBufReadMode) (data []uint64, tags []unsafe.Pointer, eof bool)

read method #

read returns true if P id's bit is set.

func (p pMask) read(id uint32) bool

read function #

go:nosplit go:cgo_unsafe_args

func read(fd int32, p unsafe.Pointer, n int32) int32

read function #

go:nosplit go:cgo_unsafe_args

func read(fd int32, p unsafe.Pointer, n int32) int32

read function #

go:nosplit

func read(fd int32, buf unsafe.Pointer, n int32) int32

read method #

read takes a globally consistent snapshot of m and puts the aggregated value in out. Even though out is a heapStatsDelta, the resulting values should be complete and valid statistic values. Not safe to call concurrently. The world must be stopped or metricsSema must be held.

func (m *consistentHeapStats) read(out *heapStatsDelta)

readGCStats function #

go:linkname readGCStats runtime/debug.readGCStats

func readGCStats(pauses *[]uint64)

readGCStats_m function #

readGCStats_m must be called on the system stack because it acquires the heap lock. See mheap for details. go:systemstack

func readGCStats_m(pauses *[]uint64)

readGOGC function #

func readGOGC() int32

readGOMEMLIMIT function #

func readGOMEMLIMIT() int64

readMetricNames function #

readMetricNames is the implementation of runtime/metrics.readMetricNames, used by the runtime/metrics test and otherwise unreferenced. go:linkname readMetricNames runtime/metrics_test.runtime_readMetricNames

func readMetricNames() []string

readMetrics function #

readMetrics is the implementation of runtime/metrics.Read. go:linkname readMetrics runtime/metrics.runtime_readMetrics

func readMetrics(samplesp unsafe.Pointer, len int, cap int)

readMetricsLocked function #

readMetricsLocked is the internal, locked portion of readMetrics. Broken out for more robust testing. metricsLock must be held and initMetrics must have been called already.

func readMetricsLocked(samplesp unsafe.Pointer, len int, cap int)

readRandom function #

go:nosplit

func readRandom(r []byte) int

readRandom function #

func readRandom(r []byte) int

readRandom function #

go:nosplit

func readRandom(r []byte) int

readRandom function #

go:nosplit

func readRandom(r []byte) int

readRandom function #

go:nosplit

func readRandom(r []byte) int

readRandom function #

go:nosplit

func readRandom(r []byte) int

readRandom function #

func readRandom(r []byte) int

readRandom function #

go:nosplit

func readRandom(r []byte) int

readRandom function #

go:nosplit

func readRandom(r []byte) int

readRandom function #

func readRandom(r []byte) int

readRandom function #

go:nosplit

func readRandom(r []byte) int

readRandom function #

go:nosplit

func readRandom(r []byte) int

readTimeRandom function #

readTimeRandom stretches any entropy in the current time into entropy the length of r and XORs it into r. This is a fallback for when readRandom does not read the full requested amount. Whatever entropy r already contained is preserved.

func readTimeRandom(r []byte)

readTrace0 function #

readTrace0 is ReadTrace's continuation on g0. This must run on the system stack because it acquires trace.lock. go:systemstack

func readTrace0() (buf []byte, park bool)

readUint16LEAt method #

go:nosplit

func (r *debugLogReader) readUint16LEAt(pos uint64) uint16

readUint64LEAt method #

go:nosplit

func (r *debugLogReader) readUint64LEAt(pos uint64) uint64

readUintptr function #

Read the bytes starting at the aligned pointer p into a uintptr. Read is little-endian.

func readUintptr(p *byte) uintptr

readUnaligned32 function #

Note: These routines perform the read with a native endianness.

func readUnaligned32(p unsafe.Pointer) uint32

readUnaligned64 function #

func readUnaligned64(p unsafe.Pointer) uint64

read_tls_fallback function #

func read_tls_fallback()

read_trampoline function #

func read_trampoline()

read_trampoline function #

func read_trampoline()

readgstatus function #

All reads and writes of g's status go through readgstatus, casgstatus castogscanstatus, casfrom_Gscanstatus. go:nosplit

func readgstatus(gp *g) uint32

readmemstats_m function #

readmemstats_m populates stats for internal runtime values. The world must be stopped.

func readmemstats_m(stats *MemStats)

readvarint function #

readvarint reads a varint from p.

func readvarint(p []byte) (read uint32, val uint32)

readvarintUnsafe function #

readvarintUnsafe reads the uint32 in varint format starting at fd, and returns the uint32 and a pointer to the byte following the varint. The implementation is the same with runtime.readvarint, except that this function uses unsafe.Pointer for speed.

func readvarintUnsafe(fd unsafe.Pointer) (uint32, unsafe.Pointer)

ready function #

Mark gp ready to run.

func ready(gp *g, traceskip int, next bool)

ready method #

ready signals to sysmon that the scavenger should be awoken.

func (s *scavengerState) ready()

readyNextGen method #

readyNextGen readies r for the generation following gen.

func (r *traceSchedResourceState) readyNextGen(gen uintptr)

readyWithTime function #

func readyWithTime(s *sudog, traceskip int)

reclaim method #

reclaim sweeps and reclaims at least npage pages into the heap. It is called before allocating npage pages to keep growth in check. reclaim implements the page-reclaimer half of the sweeper. h.lock must NOT be held.

func (h *mheap) reclaim(npage uintptr)

reclaimChunk method #

reclaimChunk sweeps unmarked spans that start at page indexes [pageIdx, pageIdx+n). It returns the number of pages returned to the heap. h.lock must be held and the caller must be non-preemptible. Note: h.lock may be temporarily unlocked and re-locked in order to do sweeping or if tracing is enabled.

func (h *mheap) reclaimChunk(arenas []arenaIdx, pageIdx uintptr, n uintptr) uintptr

record method #

record adds the given duration to the distribution. Disallow preemptions and stack growths because this function may run in sensitive locations. go:nosplit

func (h *timeHistogram) record(duration int64)

recordForPanic function #

recordForPanic maintains a circular buffer of messages written by the runtime leading up to a process crash, allowing the messages to be extracted from a core dump. The text written during a process crash (following "panic" or "fatal error") is not saved, since the goroutine stacks will generally be readable from the runtime data structures in the core file.

func recordForPanic(b []byte)

recordLock method #

func (prof *mLockProfile) recordLock(cycles int64, l *mutex)

recordUnlock method #

From unlock2, we might not be holding a p in this code. go:nowritebarrierrec

func (prof *mLockProfile) recordUnlock(l *mutex)

recordspan function #

recordspan adds a newly allocated span to h.allspans. This only happens the first time a span is allocated from mheap.spanalloc (it is not called when a span is reused). Write barriers are disallowed here because it can be called from gcWork when allocating new workbufs. However, because it's an indirect call from the fixalloc initializer, the compiler can't see this. The heap lock must be held. go:nowritebarrierrec

func recordspan(vh unsafe.Pointer, p unsafe.Pointer)

recovery function #

Unwind the stack after a deferred function calls recover after a panic. Then arrange to continue running as though the caller of the deferred function returned normally. However, if unwinding the stack would skip over a Goexit call, we return into the Goexit loop instead, so it can continue processing defers instead.

func recovery(gp *g)

recv function #

recv processes a receive operation on a full channel c. There are 2 parts: 1. The value sent by the sender sg is put into the channel and the sender is woken up to go on its merry way. 2. The value received by the receiver (the current G) is written to ep. For synchronous channels, both values are the same. For asynchronous channels, the receiver gets its data from the channel buffer and the sender's data is put in the channel buffer. Channel c must be full and locked. recv unlocks c with unlockf. sg must already be dequeued from c. A non-nil ep must point to the heap or the caller's stack.

func recv(c *hchan, sg *sudog, ep unsafe.Pointer, unlockf func(), skip int)

recvDirect function #

func recvDirect(t *_type, sg *sudog, dst unsafe.Pointer)

redZoneSize function #

redZoneSize computes the size of the redzone for a given allocation. Refer to the implementation of the compiler-rt.

func redZoneSize(userSize uintptr) uintptr

reentersyscall function #

The goroutine g is about to enter a system call. Record that it's not using the cpu anymore. This is called only from the go syscall library and cgocall, not from the low-level system calls used by the runtime. Entersyscall cannot split the stack: the save must make g->sched refer to the caller's stack segment, because entersyscall is going to return immediately after. Nothing entersyscall calls can split the stack either. We cannot safely move the stack during an active call to syscall, because we do not know which of the uintptr arguments are really pointers (back into the stack). In practice, this means that we make the fast path run through entersyscall doing no-split things, and the slow path has to use systemstack to run bigger things on the system stack. reentersyscall is the entry point used by cgo callbacks, where explicitly saved SP and PC are restored. This is needed when exitsyscall will be called from a function further up in the call stack than the parent, as g->syscallsp must always point to a valid stack frame. entersyscall below is the normal entry point for syscalls, which obtains the SP and PC from the caller. go:nosplit

func reentersyscall(pc uintptr, sp uintptr, bp uintptr)

refill method #

refill acquires a new span of span class spc for c. This span will have at least one free object. The current span in c must be full. Must run in a non-preemptible context since otherwise the owner of c could change.

func (c *mcache) refill(spc spanClass)

refill method #

refill inserts the current arena chunk onto the full list and obtains a new one, either from the partial list or allocating a new one, both from mheap.

func (a *userArena) refill() *mspan

refill method #

refill puts w.traceBuf on the queue of full buffers and refresh's w's buffer.

func (w traceWriter) refill() traceWriter

refillAllocCache method #

refillAllocCache takes 8 bytes s.allocBits starting at whichByte and negates them so that ctz (count trailing zeros) instructions can be used. It then places these 8 bytes into the cached 64 bit s.allocCache.

func (s *mspan) refillAllocCache(whichByte uint16)

reflectOffsLock function #

func reflectOffsLock()

reflectOffsUnlock function #

func reflectOffsUnlock()

reflect_addReflectOff function #

reflect_addReflectOff adds a pointer to the reflection offset lookup map. go:linkname reflect_addReflectOff reflect.addReflectOff

func reflect_addReflectOff(ptr unsafe.Pointer) int32

reflect_chancap function #

go:linkname reflect_chancap reflect.chancap

func reflect_chancap(c *hchan) int

reflect_chanclose function #

go:linkname reflect_chanclose reflect.chanclose

func reflect_chanclose(c *hchan)

reflect_chanlen function #

go:linkname reflect_chanlen reflect.chanlen

func reflect_chanlen(c *hchan) int

reflect_chanrecv function #

go:linkname reflect_chanrecv reflect.chanrecv

func reflect_chanrecv(c *hchan, nb bool, elem unsafe.Pointer) (selected bool, received bool)

reflect_chansend function #

go:linkname reflect_chansend reflect.chansend0

func reflect_chansend(c *hchan, elem unsafe.Pointer, nb bool) (selected bool)

reflect_gcbits function #

reflect_gcbits returns the GC type info for x, for testing. The result is the bitmap entries (0 or 1), one entry per byte. go:linkname reflect_gcbits reflect.gcbits

func reflect_gcbits(x any) []byte

reflect_growslice function #

reflect_growslice should be an internal detail, but widely used packages access it using linkname. Notable members of the hall of shame include: - github.com/cloudwego/dynamicgo Do not remove or change the type signature. See go.dev/issue/67401. go:linkname reflect_growslice reflect.growslice

func reflect_growslice(et *_type, old slice, num int) slice

reflect_ifaceE2I function #

reflect_ifaceE2I is for package reflect, but widely used packages access it using linkname. Notable members of the hall of shame include: - gitee.com/quant1x/gox - github.com/modern-go/reflect2 - github.com/v2pro/plz Do not remove or change the type signature. go:linkname reflect_ifaceE2I reflect.ifaceE2I

func reflect_ifaceE2I(inter *interfacetype, e eface, dst *iface)

reflect_makechan function #

go:linkname reflect_makechan reflect.makechan

func reflect_makechan(t *chantype, size int) *hchan

reflect_makemap function #

reflect_makemap is for package reflect, but widely used packages access it using linkname. Notable members of the hall of shame include: - gitee.com/quant1x/gox - github.com/modern-go/reflect2 - github.com/goccy/go-json - github.com/RomiChan/protobuf - github.com/segmentio/encoding - github.com/v2pro/plz Do not remove or change the type signature. See go.dev/issue/67401. go:linkname reflect_makemap reflect.makemap

func reflect_makemap(t *maptype, cap int) *hmap

reflect_makemap function #

reflect_makemap is for package reflect, but widely used packages access it using linkname. Notable members of the hall of shame include: - gitee.com/quant1x/gox - github.com/modern-go/reflect2 - github.com/goccy/go-json - github.com/RomiChan/protobuf - github.com/segmentio/encoding - github.com/v2pro/plz Do not remove or change the type signature. See go.dev/issue/67401. go:linkname reflect_makemap reflect.makemap

func reflect_makemap(t *abi.SwissMapType, cap int) *maps.Map

reflect_mapaccess function #

reflect_mapaccess is for package reflect, but widely used packages access it using linkname. Notable members of the hall of shame include: - gitee.com/quant1x/gox - github.com/modern-go/reflect2 - github.com/v2pro/plz Do not remove or change the type signature. See go.dev/issue/67401. go:linkname reflect_mapaccess reflect.mapaccess

func reflect_mapaccess(t *maptype, h *hmap, key unsafe.Pointer) unsafe.Pointer

reflect_mapaccess function #

reflect_mapaccess is for package reflect, but widely used packages access it using linkname. Notable members of the hall of shame include: - gitee.com/quant1x/gox - github.com/modern-go/reflect2 - github.com/v2pro/plz Do not remove or change the type signature. See go.dev/issue/67401. go:linkname reflect_mapaccess reflect.mapaccess

func reflect_mapaccess(t *abi.SwissMapType, m *maps.Map, key unsafe.Pointer) unsafe.Pointer

reflect_mapaccess_faststr function #

go:linkname reflect_mapaccess_faststr reflect.mapaccess_faststr

func reflect_mapaccess_faststr(t *abi.SwissMapType, m *maps.Map, key string) unsafe.Pointer

reflect_mapaccess_faststr function #

go:linkname reflect_mapaccess_faststr reflect.mapaccess_faststr

func reflect_mapaccess_faststr(t *maptype, h *hmap, key string) unsafe.Pointer

reflect_mapassign function #

reflect_mapassign is for package reflect, but widely used packages access it using linkname. Notable members of the hall of shame include: - gitee.com/quant1x/gox - github.com/v2pro/plz Do not remove or change the type signature. go:linkname reflect_mapassign reflect.mapassign0

func reflect_mapassign(t *abi.SwissMapType, m *maps.Map, key unsafe.Pointer, elem unsafe.Pointer)

reflect_mapassign function #

reflect_mapassign is for package reflect, but widely used packages access it using linkname. Notable members of the hall of shame include: - gitee.com/quant1x/gox - github.com/v2pro/plz Do not remove or change the type signature. go:linkname reflect_mapassign reflect.mapassign0

func reflect_mapassign(t *maptype, h *hmap, key unsafe.Pointer, elem unsafe.Pointer)

reflect_mapassign_faststr function #

go:linkname reflect_mapassign_faststr reflect.mapassign_faststr0

func reflect_mapassign_faststr(t *maptype, h *hmap, key string, elem unsafe.Pointer)

reflect_mapassign_faststr function #

go:linkname reflect_mapassign_faststr reflect.mapassign_faststr0

func reflect_mapassign_faststr(t *abi.SwissMapType, m *maps.Map, key string, elem unsafe.Pointer)

reflect_mapclear function #

go:linkname reflect_mapclear reflect.mapclear

func reflect_mapclear(t *abi.SwissMapType, m *maps.Map)

reflect_mapclear function #

go:linkname reflect_mapclear reflect.mapclear

func reflect_mapclear(t *maptype, h *hmap)

reflect_mapdelete function #

go:linkname reflect_mapdelete reflect.mapdelete

func reflect_mapdelete(t *abi.SwissMapType, m *maps.Map, key unsafe.Pointer)

reflect_mapdelete function #

go:linkname reflect_mapdelete reflect.mapdelete

func reflect_mapdelete(t *maptype, h *hmap, key unsafe.Pointer)

reflect_mapdelete_faststr function #

go:linkname reflect_mapdelete_faststr reflect.mapdelete_faststr

func reflect_mapdelete_faststr(t *abi.SwissMapType, m *maps.Map, key string)

reflect_mapdelete_faststr function #

go:linkname reflect_mapdelete_faststr reflect.mapdelete_faststr

func reflect_mapdelete_faststr(t *maptype, h *hmap, key string)

reflect_mapiterelem function #

reflect_mapiterelem is a compatibility wrapper for map iterator for users of //go:linkname from before Go 1.24. It is not used by Go itself. New users should use reflect or the maps package. reflect_mapiterelem should be an internal detail, but widely used packages access it using linkname. Notable members of the hall of shame include: - github.com/goccy/go-json - gonum.org/v1/gonum Do not remove or change the type signature. See go.dev/issue/67401. go:linkname reflect_mapiterelem reflect.mapiterelem

func reflect_mapiterelem(it *linknameIter) unsafe.Pointer

reflect_mapiterelem function #

reflect_mapiterelem was for package reflect, but widely used packages access it using linkname. Notable members of the hall of shame include: - github.com/goccy/go-json - gonum.org/v1/gonum Do not remove or change the type signature. See go.dev/issue/67401. go:linkname reflect_mapiterelem reflect.mapiterelem

func reflect_mapiterelem(it *hiter) unsafe.Pointer

reflect_mapiterinit function #

reflect_mapiterinit is for package reflect, but widely used packages access it using linkname. Notable members of the hall of shame include: - github.com/modern-go/reflect2 - gitee.com/quant1x/gox - github.com/v2pro/plz - github.com/wI2L/jettison Do not remove or change the type signature. See go.dev/issue/67401. go:linkname reflect_mapiterinit reflect.mapiterinit

func reflect_mapiterinit(t *maptype, h *hmap, it *hiter)

reflect_mapiterinit function #

reflect_mapiterinit is a compatibility wrapper for map iterator for users of //go:linkname from before Go 1.24. It is not used by Go itself. New users should use reflect or the maps package. reflect_mapiterinit should be an internal detail, but widely used packages access it using linkname. Notable members of the hall of shame include: - github.com/modern-go/reflect2 - gitee.com/quant1x/gox - github.com/v2pro/plz - github.com/wI2L/jettison Do not remove or change the type signature. See go.dev/issue/67401. go:linkname reflect_mapiterinit reflect.mapiterinit

func reflect_mapiterinit(t *abi.SwissMapType, m *maps.Map, it *linknameIter)

reflect_mapiterkey function #

reflect_mapiterkey was for package reflect, but widely used packages access it using linkname. Notable members of the hall of shame include: - github.com/goccy/go-json - gonum.org/v1/gonum Do not remove or change the type signature. See go.dev/issue/67401. go:linkname reflect_mapiterkey reflect.mapiterkey

func reflect_mapiterkey(it *hiter) unsafe.Pointer

reflect_mapiterkey function #

reflect_mapiterkey is a compatibility wrapper for map iterator for users of //go:linkname from before Go 1.24. It is not used by Go itself. New users should use reflect or the maps package. reflect_mapiterkey should be an internal detail, but widely used packages access it using linkname. Notable members of the hall of shame include: - github.com/goccy/go-json - gonum.org/v1/gonum Do not remove or change the type signature. See go.dev/issue/67401. go:linkname reflect_mapiterkey reflect.mapiterkey

func reflect_mapiterkey(it *linknameIter) unsafe.Pointer

reflect_mapiternext function #

reflect_mapiternext is for package reflect, but widely used packages access it using linkname. Notable members of the hall of shame include: - gitee.com/quant1x/gox - github.com/modern-go/reflect2 - github.com/goccy/go-json - github.com/v2pro/plz - github.com/wI2L/jettison Do not remove or change the type signature. See go.dev/issue/67401. go:linkname reflect_mapiternext reflect.mapiternext

func reflect_mapiternext(it *hiter)

reflect_mapiternext function #

reflect_mapiternext is a compatibility wrapper for map iterator for users of //go:linkname from before Go 1.24. It is not used by Go itself. New users should use reflect or the maps package. reflect_mapiternext is for package reflect, but widely used packages access it using linkname. Notable members of the hall of shame include: - gitee.com/quant1x/gox - github.com/modern-go/reflect2 - github.com/goccy/go-json - github.com/v2pro/plz - github.com/wI2L/jettison Do not remove or change the type signature. See go.dev/issue/67401. go:linkname reflect_mapiternext reflect.mapiternext

func reflect_mapiternext(it *linknameIter)

reflect_maplen function #

reflect_maplen is for package reflect, but widely used packages access it using linkname. Notable members of the hall of shame include: - github.com/goccy/go-json - github.com/wI2L/jettison Do not remove or change the type signature. See go.dev/issue/67401. go:linkname reflect_maplen reflect.maplen

func reflect_maplen(m *maps.Map) int

reflect_maplen function #

reflect_maplen is for package reflect, but widely used packages access it using linkname. Notable members of the hall of shame include: - github.com/goccy/go-json - github.com/wI2L/jettison Do not remove or change the type signature. See go.dev/issue/67401. go:linkname reflect_maplen reflect.maplen

func reflect_maplen(h *hmap) int

reflect_memclrNoHeapPointers function #

go:linkname reflect_memclrNoHeapPointers reflect.memclrNoHeapPointers

func reflect_memclrNoHeapPointers(ptr unsafe.Pointer, n uintptr)

reflect_memmove function #

go:linkname reflect_memmove reflect.memmove

func reflect_memmove(to unsafe.Pointer, from unsafe.Pointer, n uintptr)

reflect_resolveNameOff function #

reflect_resolveNameOff resolves a name offset from a base pointer. reflect_resolveNameOff is for package reflect, but widely used packages access it using linkname. Notable members of the hall of shame include: - github.com/agiledragon/gomonkey/v2 Do not remove or change the type signature. See go.dev/issue/67401. go:linkname reflect_resolveNameOff reflect.resolveNameOff

func reflect_resolveNameOff(ptrInModule unsafe.Pointer, off int32) unsafe.Pointer

reflect_resolveTextOff function #

reflect_resolveTextOff resolves a function pointer offset from a base type. reflect_resolveTextOff is for package reflect, but widely used packages access it using linkname. Notable members of the hall of shame include: - github.com/agiledragon/gomonkey/v2 Do not remove or change the type signature. See go.dev/issue/67401. go:linkname reflect_resolveTextOff reflect.resolveTextOff

func reflect_resolveTextOff(rtype unsafe.Pointer, off int32) unsafe.Pointer

reflect_resolveTypeOff function #

reflect_resolveTypeOff resolves an *rtype offset from a base type. reflect_resolveTypeOff is meant for package reflect, but widely used packages access it using linkname. Notable members of the hall of shame include: - gitee.com/quant1x/gox - github.com/modern-go/reflect2 - github.com/v2pro/plz - github.com/timandy/routine Do not remove or change the type signature. See go.dev/issue/67401. go:linkname reflect_resolveTypeOff reflect.resolveTypeOff

func reflect_resolveTypeOff(rtype unsafe.Pointer, off int32) unsafe.Pointer

reflect_rselect function #

go:linkname reflect_rselect reflect.rselect

func reflect_rselect(cases []runtimeSelect) (int, bool)

reflect_typedarrayclear function #

go:linkname reflect_typedarrayclear reflect.typedarrayclear

func reflect_typedarrayclear(typ *_type, ptr unsafe.Pointer, len int)

reflect_typedmemclr function #

reflect_typedmemclr is meant for package reflect, but widely used packages access it using linkname. Notable members of the hall of shame include: - github.com/ugorji/go/codec Do not remove or change the type signature. See go.dev/issue/67401. go:linkname reflect_typedmemclr reflect.typedmemclr

func reflect_typedmemclr(typ *_type, ptr unsafe.Pointer)

reflect_typedmemclrpartial function #

go:linkname reflect_typedmemclrpartial reflect.typedmemclrpartial

func reflect_typedmemclrpartial(typ *_type, ptr unsafe.Pointer, off uintptr, size uintptr)

reflect_typedmemmove function #

reflect_typedmemmove is meant for package reflect, but widely used packages access it using linkname. Notable members of the hall of shame include: - gitee.com/quant1x/gox - github.com/goccy/json - github.com/modern-go/reflect2 - github.com/ugorji/go/codec - github.com/v2pro/plz Do not remove or change the type signature. See go.dev/issue/67401. go:linkname reflect_typedmemmove reflect.typedmemmove

func reflect_typedmemmove(typ *_type, dst unsafe.Pointer, src unsafe.Pointer)

reflect_typedslicecopy function #

reflect_typedslicecopy is meant for package reflect, but widely used packages access it using linkname. Notable members of the hall of shame include: - gitee.com/quant1x/gox - github.com/modern-go/reflect2 - github.com/RomiChan/protobuf - github.com/segmentio/encoding - github.com/v2pro/plz Do not remove or change the type signature. See go.dev/issue/67401. go:linkname reflect_typedslicecopy reflect.typedslicecopy

func reflect_typedslicecopy(elemType *_type, dst slice, src slice) int

reflect_typehash function #

go:linkname reflect_typehash reflect.typehash

func reflect_typehash(t *_type, p unsafe.Pointer, h uintptr) uintptr

reflect_unsafe_New function #

reflect_unsafe_New is meant for package reflect, but widely used packages access it using linkname. Notable members of the hall of shame include: - gitee.com/quant1x/gox - github.com/goccy/json - github.com/modern-go/reflect2 - github.com/v2pro/plz Do not remove or change the type signature. See go.dev/issue/67401. go:linkname reflect_unsafe_New reflect.unsafe_New

func reflect_unsafe_New(typ *_type) unsafe.Pointer

reflect_unsafe_NewArray function #

reflect_unsafe_NewArray is meant for package reflect, but widely used packages access it using linkname. Notable members of the hall of shame include: - gitee.com/quant1x/gox - github.com/bytedance/sonic - github.com/goccy/json - github.com/modern-go/reflect2 - github.com/segmentio/encoding - github.com/segmentio/kafka-go - github.com/v2pro/plz Do not remove or change the type signature. See go.dev/issue/67401. go:linkname reflect_unsafe_NewArray reflect.unsafe_NewArray

func reflect_unsafe_NewArray(typ *_type, n int) unsafe.Pointer

reflect_unsafeslice function #

go:linkname reflect_unsafeslice reflect.unsafeslice

func reflect_unsafeslice(et *_type, ptr unsafe.Pointer, len int)

reflect_verifyNotInHeapPtr function #

reflect_verifyNotInHeapPtr reports whether converting the not-in-heap pointer into a unsafe.Pointer is ok. go:linkname reflect_verifyNotInHeapPtr reflect.verifyNotInHeapPtr

func reflect_verifyNotInHeapPtr(p uintptr) bool

reflectcall function #

reflectcall calls fn with arguments described by stackArgs, stackArgsSize, frameSize, and regArgs. Arguments passed on the stack and space for return values passed on the stack must be laid out at the space pointed to by stackArgs (with total length stackArgsSize) according to the ABI. stackRetOffset must be some value <= stackArgsSize that indicates the offset within stackArgs where the return value space begins. frameSize is the total size of the argument frame at stackArgs and must therefore be >= stackArgsSize. It must include additional space for spilling register arguments for stack growth and preemption. TODO(mknyszek): Once we don't need the additional spill space, remove frameSize, since frameSize will be redundant with stackArgsSize. Arguments passed in registers must be laid out in regArgs according to the ABI. regArgs will hold any return values passed in registers after the call. reflectcall copies stack arguments from stackArgs to the goroutine stack, and then copies back stackArgsSize-stackRetOffset bytes back to the return space in stackArgs once fn has completed. It also "unspills" argument registers from regArgs before calling fn, and spills them back into regArgs immediately following the call to fn. If there are results being returned on the stack, the caller should pass the argument frame type as stackArgsType so that reflectcall can execute appropriate write barriers during the copy. reflectcall expects regArgs.ReturnIsPtr to be populated indicating which registers on the return path will contain Go pointers. It will then store these pointers in regArgs.Ptrs such that they are visible to the GC. Package reflect passes a frame type. In package runtime, there is only one call that copies results back, in callbackWrap in syscall_windows.go, and it does NOT pass a frame type, meaning there are no write barriers invoked. See that call site for justification. Package reflect accesses this symbol through a linkname. Arguments passed through to reflectcall do not escape. The type is used only in a very limited callee of reflectcall, the stackArgs are copied, and regArgs is only used in the reflectcall frame. go:noescape

func reflectcall(stackArgsType *_type, fn unsafe.Pointer, stackArgs unsafe.Pointer, stackArgsSize uint32, stackRetOffset uint32, frameSize uint32, regArgs *abi.RegArgs)

reflectcallmove function #

reflectcallmove is invoked by reflectcall to copy the return values out of the stack and into the heap, invoking the necessary write barriers. dst, src, and size describe the return value area to copy. typ describes the entire frame (not just the return values). typ may be nil, which indicates write barriers are not needed. It must be nosplit and must only call nosplit functions because the stack map of reflectcall is wrong. go:nosplit

func reflectcallmove(typ *_type, dst unsafe.Pointer, src unsafe.Pointer, size uintptr, regs *abi.RegArgs)

reflectlite_chanlen function #

go:linkname reflectlite_chanlen internal/reflectlite.chanlen

func reflectlite_chanlen(c *hchan) int

reflectlite_ifaceE2I function #

go:linkname reflectlite_ifaceE2I internal/reflectlite.ifaceE2I

func reflectlite_ifaceE2I(inter *interfacetype, e eface, dst *iface)

reflectlite_maplen function #

go:linkname reflectlite_maplen internal/reflectlite.maplen

func reflectlite_maplen(m *maps.Map) int

reflectlite_maplen function #

go:linkname reflectlite_maplen internal/reflectlite.maplen

func reflectlite_maplen(h *hmap) int

reflectlite_resolveNameOff function #

reflectlite_resolveNameOff resolves a name offset from a base pointer. go:linkname reflectlite_resolveNameOff internal/reflectlite.resolveNameOff

func reflectlite_resolveNameOff(ptrInModule unsafe.Pointer, off int32) unsafe.Pointer

reflectlite_resolveTypeOff function #

reflectlite_resolveTypeOff resolves an *rtype offset from a base type. go:linkname reflectlite_resolveTypeOff internal/reflectlite.resolveTypeOff

func reflectlite_resolveTypeOff(rtype unsafe.Pointer, off int32) unsafe.Pointer

reflectlite_typedmemmove function #

go:linkname reflectlite_typedmemmove internal/reflectlite.typedmemmove

func reflectlite_typedmemmove(typ *_type, dst unsafe.Pointer, src unsafe.Pointer)

reflectlite_unsafe_New function #

go:linkname reflectlite_unsafe_New internal/reflectlite.unsafe_New

func reflectlite_unsafe_New(typ *_type) unsafe.Pointer

refreshPinnerBits method #

refreshPinnerBits replaces pinnerBits with a fresh copy in the arenas for the next GC cycle. If it does not contain any pinned objects, pinnerBits of the span is set to nil.

func (s *mspan) refreshPinnerBits()

reginit function #

func reginit()

regs method #

go:nosplit go:nowritebarrierrec

func (c *sigctxt) regs() *context64

regs method #

go:nosplit go:nowritebarrierrec

func (c *sigctxt) regs() *mcontextt

regs method #

go:nosplit go:nowritebarrierrec

func (c *sigctxt) regs() *mcontextt

regs method #

go:nosplit go:nowritebarrierrec

func (c *sigctxt) regs() *mcontextt

regs method #

go:nosplit go:nowritebarrierrec

func (c *sigctxt) regs() *mcontext

regs method #

go:nosplit go:nowritebarrierrec

func (c *sigctxt) regs() *mcontext

regs method #

go:nosplit go:nowritebarrierrec

func (c *sigctxt) regs() *sigcontext

regs method #

go:nosplit go:nowritebarrierrec

func (c *sigctxt) regs() *sigcontext

regs method #

func (c *sigctxt) regs() *sigcontext

regs method #

go:nosplit go:nowritebarrierrec

func (c *sigctxt) regs() *mcontext

regs method #

go:nosplit go:nowritebarrierrec

func (c *sigctxt) regs() *sigcontext

regs method #

go:nosplit go:nowritebarrierrec

func (c *sigctxt) regs() *sigcontext

regs method #

go:nosplit go:nowritebarrierrec

func (c *sigctxt) regs() *mcontext

regs method #

go:nosplit go:nowritebarrierrec

func (c *sigctxt) regs() *sigcontext

regs method #

go:nosplit go:nowritebarrierrec

func (c *sigctxt) regs() *mcontextt

regs method #

go:nosplit go:nowritebarrierrec

func (c *sigctxt) regs() *sigcontext

regs method #

go:nosplit go:nowritebarrierrec

func (c *sigctxt) regs() *sigcontext

regs method #

go:nosplit go:nowritebarrierrec

func (c *sigctxt) regs() *sigcontext

regs method #

go:nosplit go:nowritebarrierrec

func (c *sigctxt) regs() *sigcontext

regs method #

go:nosplit go:nowritebarrierrec

func (c *sigctxt) regs() *sigcontext

regs method #

go:nosplit go:nowritebarrierrec

func (c *sigctxt) regs() *sigcontext

regs method #

go:nosplit go:nowritebarrierrec

func (c *sigctxt) regs() *mcontext

regs method #

go:nosplit go:nowritebarrierrec

func (c *sigctxt) regs() *mcontext

regs method #

go:nosplit go:nowritebarrierrec

func (c *sigctxt) regs() *sigcontext

regs method #

go:nosplit go:nowritebarrierrec

func (c *sigctxt) regs() *regs64

regs method #

go:nosplit go:nowritebarrierrec

func (c *sigctxt) regs() *sigcontext

regs method #

go:nosplit go:nowritebarrierrec

func (c *sigctxt) regs() *sigcontext

regs method #

go:nosplit go:nowritebarrierrec

func (c *sigctxt) regs() *regs64

regs method #

go:nosplit go:nowritebarrierrec

func (c *sigctxt) regs() *sigcontext

regs method #

go:nosplit go:nowritebarrierrec

func (c *sigctxt) regs() *ptregs

regs method #

go:nosplit go:nowritebarrierrec

func (c *sigctxt) regs() *mcontext

release method #

release indicates that the writer is done modifying the delta. The value returned by the corresponding acquire must no longer be accessed or modified after release is called. The caller's P must not change between acquire and release. This also means that the caller should not acquire a P or release its P in between. nosplit because a stack growth in this function could lead to a stack allocation that causes another acquire before this operation has completed. go:nosplit

func (m *consistentHeapStats) release()

releaseAll method #

func (c *mcache) releaseAll()

releaseLockRankAndM function #

releaseLockRankAndM releases a rank which is not associated with a mutex lock. To maintain the invariant that an M with m.locks==0 does not hold any lock-like resources, it also releases the M. This function may be called in nosplit context and thus must be nosplit. go:nosplit

func releaseLockRankAndM(rank lockRank)

releaseLockRankAndM function #

This function may be called in nosplit context and thus must be nosplit. go:nosplit

func releaseLockRankAndM(rank lockRank)

releaseSudog function #

go:nosplit

func releaseSudog(s *sudog)

releasem function #

go:nosplit

func releasem(mp *m)

releasep function #

Disassociate p and the current m.

func releasep() *p

releasepNoTrace function #

Disassociate p and the current m without tracing an event.

func releasepNoTrace() *p

remove method #

func (list *mSpanList) remove(span *mspan)

removeGreaterEqual method #

removeGreaterEqual removes all addresses in a greater than or equal to addr and returns the new range.

func (a addrRange) removeGreaterEqual(addr uintptr) addrRange

removeGreaterEqual method #

removeGreaterEqual removes the ranges of a which are above addr, and additionally splits any range containing addr.

func (a *addrRanges) removeGreaterEqual(addr uintptr)

removeIdleMarkWorker method #

removeIdleMarkWorker must be called when a new idle mark worker stops executing.

func (c *gcControllerState) removeIdleMarkWorker()

removeLast method #

removeLast removes and returns the highest-addressed contiguous range of a, or the last nBytes of that range, whichever is smaller. If a is empty, it returns an empty range.

func (a *addrRanges) removeLast(nBytes uintptr) addrRange

removefinalizer function #

Removes the finalizer (if any) from the object p.

func removefinalizer(p unsafe.Pointer)

removespecial function #

Removes the Special record of the given kind for the object p. Returns the record if the record existed, nil otherwise. The caller must FixAlloc_Free the result.

func removespecial(p unsafe.Pointer, kind uint8) *special

removesub function #

func removesub(i int)

reparsedebugvars function #

reparsedebugvars reparses the runtime's debug variables because the environment variable has been changed to env.

func reparsedebugvars(env string)

reportZombies method #

reportZombies reports any marked but free objects in s and throws. This generally means one of the following: 1. User code converted a pointer to a uintptr and then back unsafely, and a GC ran while the uintptr was the only reference to an object. 2. User code (or a compiler bug) constructed a bad pointer that points to a free slot, often a past-the-end pointer. 3. The GC two cycles ago missed a pointer and freed a live object, but it was still live in the last cycle, so this GC cycle found a pointer to that object and marked it.

func (s *mspan) reportZombies()

reset method #

reset resets the time when a timer should fire. If used for an inactive timer, the timer will become active. Reports whether the timer was active and was stopped.

func (t *timer) reset(when int64, period int64) bool

reset method #

reset clears the string table and flushes any buffers it has. Must be called only once the caller is certain nothing else will be added to this table.

func (t *traceStringTable) reset(gen uintptr)

reset method #

reset empties b by resetting its next and end pointers.

func (b *wbBuf) reset()

reset method #

reset resets the controller state, except for controller error flags.

func (c *piController) reset()

reset method #

reset clears the headTailIndex to (0, 0).

func (h *atomicHeadTailIndex) reset()

reset method #

func (ord *randomOrder) reset(count uint32)

reset method #

reset drops all allocated memory from the table and resets it. The caller must ensure that there are no put operations executing concurrently with this function.

func (tab *traceMap) reset()

reset method #

reset resets a spanSet which is empty. It will also clean up any left over blocks. Throws if the buf is not empty. reset may not be called concurrently with any other operations on the span set.

func (b *spanSet) reset()

reset method #

reset sets up the activeSweep for the next sweep cycle. The world must be stopped.

func (a *activeSweep) reset()

reset method #

reset resets the gTraceState for a new goroutine.

func (s *gTraceState) reset()

resetCapacity method #

resetCapacity updates the capacity based on GOMAXPROCS. Must not be called while the GC is enabled. It is safe to call concurrently with other operations.

func (l *gcCPULimiterState) resetCapacity(now int64, nprocs int32)

resetForSleep function #

resetForSleep is called after the goroutine is parked for timeSleep. We can't call timer.reset in timeSleep itself because if this is a short sleep and there are many goroutines then the P can wind up running the timer function, goroutineReady, before the goroutine has been parked.

func resetForSleep(gp *g, _ unsafe.Pointer) bool

resetLive method #

resetLive sets up the controller state for the next mark phase after the end of the previous one. Must be called after endCycle and before commit, before the world is started. The world must be stopped.

func (c *gcControllerState) resetLive(bytesMarked uint64)

resetMemoryDataView function #

func resetMemoryDataView()

resetMemoryDataView function #

resetMemoryDataView signals the JS front-end that WebAssembly's memory.grow instruction has been used. This allows the front-end to replace the old DataView object with a new one. go:wasmimport gojs runtime.resetMemoryDataView

func resetMemoryDataView()

resetTimer function #

resetTimer resets an inactive timer, adding it to the timer heap. Reports whether the timer was modified before it was run. go:linkname resetTimer time.resetTimer

func resetTimer(t *timeTimer, when int64, period int64) bool

resetspinning function #

func resetspinning()

resolveInternal method #

func (u *inlineUnwinder) resolveInternal(pc uintptr) inlineFrame

resolveInternal method #

resolveInternal fills in u.frame based on u.frame.fn, pc, and sp. innermost indicates that this is the first resolve on this stack. If innermost is set, isSyscall indicates that the PC/SP was retrieved from gp.syscall*; this is otherwise ignored. On entry, u.frame contains: - fn is the running function. - pc is the PC in the running function. - sp is the stack pointer at that program counter. - For the innermost frame on LR machines, lr is the program counter that called fn. On return, u.frame contains: - fp is the stack pointer of the caller. - lr is the program counter that called fn. - varp, argp, and continpc are populated for the current frame. If fn is a stack-jumping function, resolveInternal can change the entire frame state to follow that stack jump. This is internal to unwinder.

func (u *unwinder) resolveInternal(innermost bool, isSyscall bool)

resolveNameOff function #

func resolveNameOff(ptrInModule unsafe.Pointer, off nameOff) name

resolveTypeOff function #

func resolveTypeOff(ptrInModule unsafe.Pointer, off typeOff) *_type

restoreGsignalStack function #

restoreGsignalStack restores the gsignal stack to the value it had before entering the signal handler. go:nosplit go:nowritebarrierrec

func restoreGsignalStack(st *gsignalStack)

restoreSIGSYS function #

go:linkname restoreSIGSYS os.restoreSIGSYS

func restoreSIGSYS()

resumeG function #

resumeG undoes the effects of suspendG, allowing the suspended goroutine to continue from its current safe-point.

func resumeG(state suspendGState)

retake function #

func retake(now int64) uint32

retpolineAX function #

Retpolines, used by -spectre=ret flag in cmd/asm, cmd/compile.

func retpolineAX()

retpolineBP function #

func retpolineBP()

retpolineBX function #

func retpolineBX()

retpolineCX function #

func retpolineCX()

retpolineDI function #

func retpolineDI()

retpolineDX function #

func retpolineDX()

retpolineR10 function #

func retpolineR10()

retpolineR11 function #

func retpolineR11()

retpolineR12 function #

func retpolineR12()

retpolineR13 function #

func retpolineR13()

retpolineR14 function #

func retpolineR14()

retpolineR15 function #

func retpolineR15()

retpolineR8 function #

func retpolineR8()

retpolineR9 function #

func retpolineR9()

retpolineSI function #

func retpolineSI()

retryOnEAGAIN function #

retryOnEAGAIN retries a function until it does not return EAGAIN. It will use an increasing delay between calls, and retry up to 20 times. The function argument is expected to return an errno value, and retryOnEAGAIN will return any errno value other than EAGAIN. If all retries return EAGAIN, then retryOnEAGAIN will return EAGAIN.

func retryOnEAGAIN(fn func() int32) int32

return0 function #

return0 is a stub used to return 0 from deferproc. It is called at the very end of deferproc to signal the calling Go function that it should not jump to deferreturn. in asm_*.s

func return0()

revise method #

revise updates the assist ratio during the GC cycle to account for improved estimates. This should be called whenever gcController.heapScan, gcController.heapLive, or if any inputs to gcController.heapGoal are updated. It is safe to call concurrently, but it may race with other calls to revise. The result of this race is that the two assist ratio values may not line up or may be stale. In practice this is OK because the assist ratio moves slowly throughout a GC cycle, and the assist ratio is a best-effort heuristic anyway. Furthermore, no part of the heuristic depends on the two assist ratio values being exact reciprocals of one another, since the two values are used to convert values from different sources. The worst case result of this raciness is that we may miss a larger shift in the ratio (say, if we decide to pace more aggressively against the hard heap goal) but even this "hard goal" is best-effort (see #40460). The dedicated GC should ensure we don't exceed the hard goal by too much in the rare case we do exceed it. It should only be called when gcBlackenEnabled != 0 (because this is when assists are enabled and the necessary statistics are available).

func (c *gcControllerState) revise()

rflags method #

func (c *sigctxt) rflags() uint64

rflags method #

func (c *sigctxt) rflags() uint64

rflags method #

func (c *sigctxt) rflags() uint64

rflags method #

func (c *sigctxt) rflags() uint64

rflags method #

func (c *sigctxt) rflags() uint64

rflags method #

func (c *sigctxt) rflags() uint64

rflags method #

func (c *sigctxt) rflags() uint64

rfork function #

func rfork(flags int32) int32

rip method #

go:nosplit go:nowritebarrierrec

func (c *sigctxt) rip() uint64

rip method #

go:nosplit go:nowritebarrierrec

func (c *sigctxt) rip() uint64

rip method #

go:nosplit go:nowritebarrierrec

func (c *sigctxt) rip() uint64

rip method #

go:nosplit go:nowritebarrierrec

func (c *sigctxt) rip() uint64

rip method #

go:nosplit go:nowritebarrierrec

func (c *sigctxt) rip() uint64

rip method #

go:nosplit go:nowritebarrierrec

func (c *sigctxt) rip() uint64

rip method #

go:nosplit go:nowritebarrierrec

func (c *sigctxt) rip() uint64

rip method #

go:nosplit go:nowritebarrierrec

func (c *sigctxt) rip() uint64

rlock method #

rlock locks rw for reading.

func (rw *rwmutex) rlock()

rootFor method #

func (t *semTable) rootFor(addr *uint32) *semaRoot

rotateLeft method #

rotateLeft rotates the tree rooted at node x. turning (x a (y b c)) into (y (x a b) c).

func (root *semaRoot) rotateLeft(x *sudog)

rotateRight method #

rotateRight rotates the tree rooted at node y. turning (y (x a b) c) into (x a (y b c)).

func (root *semaRoot) rotateRight(y *sudog)

round2 function #

round x up to a power of 2.

func round2(x int32) int32

roundupsize function #

Returns size of the memory block that mallocgc will allocate if you ask for the size, minus any inline space for metadata.

func roundupsize(size uintptr, noscan bool) (reqSize uintptr)

rsi method #

func (c *sigctxt) rsi() uint64

rsi method #

func (c *sigctxt) rsi() uint64

rsi method #

func (c *sigctxt) rsi() uint64

rsi method #

func (c *sigctxt) rsi() uint64

rsi method #

func (c *sigctxt) rsi() uint64

rsi method #

func (c *sigctxt) rsi() uint64

rsi method #

func (c *sigctxt) rsi() uint64

rsp method #

func (c *sigctxt) rsp() uint64

rsp method #

func (c *sigctxt) rsp() uint64

rsp method #

func (c *sigctxt) rsp() uint64

rsp method #

func (c *sigctxt) rsp() uint64

rsp method #

func (c *sigctxt) rsp() uint64

rsp method #

func (c *sigctxt) rsp() uint64

rsp method #

func (c *sigctxt) rsp() uint64

rt0_go function #

func rt0_go()

rt_sigaction function #

rt_sigaction is implemented in assembly. go:noescape

func rt_sigaction(sig uintptr, new *sigactiont, old *sigactiont, size uintptr) int32

rtsigprocmask function #

go:noescape

func rtsigprocmask(how int32, new *sigset, old *sigset, size int32)

rtype method #

rtype returns a traceArg representing typ which may be passed to write.

func (tl traceLocker) rtype(typ *abi.Type) traceArg

run method #

run is the body of the main scavenging loop. Returns the number of bytes released and the estimated time spent releasing those bytes. Must be run on the scavenger goroutine.

func (s *scavengerState) run() (released uintptr, worked float64)

run method #

run examines the first timer in ts. If it is ready based on now, it runs the timer and removes or updates it. Returns 0 if it ran a timer, -1 if there are no more timers, or the time when the first timer should run. The caller must have locked ts. If a timer is run, this will temporarily unlock ts. go:systemstack

func (ts *timers) run(now int64) int64

runExitHooks function #

func runExitHooks(code int)

runGCProg function #

runGCProg returns the number of 1-bit entries written to memory.

func runGCProg(prog *byte, dst *byte) uintptr

runPerThreadSyscall function #

go:nosplit

func runPerThreadSyscall()

runPerThreadSyscall function #

runPerThreadSyscall runs perThreadSyscall for this M if required. This function throws if the system call returns with anything other than the expected values. go:nosplit

func runPerThreadSyscall()

runPerThreadSyscall function #

go:nosplit

func runPerThreadSyscall()

runPerThreadSyscall function #

go:nosplit

func runPerThreadSyscall()

runPerThreadSyscall function #

go:nosplit

func runPerThreadSyscall()

runPerThreadSyscall function #

go:nosplit

func runPerThreadSyscall()

runPerThreadSyscall function #

go:nosplit

func runPerThreadSyscall()

runPerThreadSyscall function #

go:nosplit

func runPerThreadSyscall()

runSafePointFn function #

runSafePointFn runs the safe point function, if any, for this P. This should be called like if getg().m.p.runSafePointFn != 0 { runSafePointFn() } runSafePointFn must be checked on any transition in to _Pidle or _Psyscall to avoid a race where forEachP sees that the P is running just before the P goes into _Pidle/_Psyscall and neither forEachP nor the P run the safe-point function.

func runSafePointFn()

runfinq function #

This is the goroutine that runs all of the finalizers and cleanups.

func runfinq()

runlock method #

runlock undoes a single rlock call on rw.

func (rw *rwmutex) runlock()

runqdrain function #

runqdrain drains the local runnable queue of pp and returns all goroutines in it. Executed only by the owner P.

func runqdrain(pp *p) (drainQ gQueue, n uint32)

runqempty function #

runqempty reports whether pp has no Gs on its local run queue. It never returns true spuriously.

func runqempty(pp *p) bool

runqget function #

Get g from local runnable queue. If inheritTime is true, gp should inherit the remaining time in the current time slice. Otherwise, it should start a new time slice. Executed only by the owner P.

func runqget(pp *p) (gp *g, inheritTime bool)

runqgrab function #

Grabs a batch of goroutines from pp's runnable queue into batch. Batch is a ring buffer starting at batchHead. Returns number of grabbed goroutines. Can be executed by any P.

func runqgrab(pp *p, batch *[256]guintptr, batchHead uint32, stealRunNextG bool) uint32

runqput function #

runqput tries to put g on the local runnable queue. If next is false, runqput adds g to the tail of the runnable queue. If next is true, runqput puts g in the pp.runnext slot. If the run queue is full, runnext puts g on the global queue. Executed only by the owner P.

func runqput(pp *p, gp *g, next bool)

runqputbatch function #

runqputbatch tries to put all the G's on q on the local runnable queue. If the queue is full, they are put on the global queue; in that case this will temporarily acquire the scheduler lock. Executed only by the owner P.

func runqputbatch(pp *p, q *gQueue, qsize int)

runqputslow function #

Put g and a batch of work from local runnable queue on global queue. Executed only by the owner P.

func runqputslow(pp *p, gp *g, h uint32, t uint32) bool

runqsteal function #

Steal half of elements from local runnable queue of p2 and put onto local runnable queue of p. Returns one of the stolen elements (or nil if failed).

func runqsteal(pp *p, p2 *p, stealRunNextG bool) *g

runtime_FrameStartLine function #

runtime_FrameStartLine returns the start line of the function in a Frame. runtime_FrameStartLine should be an internal detail, but widely used packages access it using linkname. Notable members of the hall of shame include: - github.com/grafana/pyroscope-go/godeltaprof Do not remove or change the type signature. See go.dev/issue/67401. go:linkname runtime_FrameStartLine runtime/pprof.runtime_FrameStartLine

func runtime_FrameStartLine(f *Frame) int

runtime_FrameSymbolName function #

runtime_FrameSymbolName returns the full symbol name of the function in a Frame. For generic functions this differs from f.Function in that this doesn't replace the shape name to "...". runtime_FrameSymbolName should be an internal detail, but widely used packages access it using linkname. Notable members of the hall of shame include: - github.com/grafana/pyroscope-go/godeltaprof Do not remove or change the type signature. See go.dev/issue/67401. go:linkname runtime_FrameSymbolName runtime/pprof.runtime_FrameSymbolName

func runtime_FrameSymbolName(f *Frame) string

runtime_debug_WriteHeapDump function #

go:linkname runtime_debug_WriteHeapDump runtime/debug.WriteHeapDump

func runtime_debug_WriteHeapDump(fd uintptr)

runtime_debug_freeOSMemory function #

go:linkname runtime_debug_freeOSMemory runtime/debug.freeOSMemory

func runtime_debug_freeOSMemory()

runtime_expandFinalInlineFrame function #

runtime_expandFinalInlineFrame expands the final pc in stk to include all "callers" if pc is inline. runtime_expandFinalInlineFrame should be an internal detail, but widely used packages access it using linkname. Notable members of the hall of shame include: - github.com/grafana/pyroscope-go/godeltaprof - github.com/pyroscope-io/godeltaprof Do not remove or change the type signature. See go.dev/issue/67401. go:linkname runtime_expandFinalInlineFrame runtime/pprof.runtime_expandFinalInlineFrame

func runtime_expandFinalInlineFrame(stk []uintptr) []uintptr

runtime_getProfLabel function #

runtime_getProfLabel should be an internal detail, but widely used packages access it using linkname. Notable members of the hall of shame include: - github.com/cloudwego/localsession Do not remove or change the type signature. See go.dev/issue/67401. go:linkname runtime_getProfLabel runtime/pprof.runtime_getProfLabel

func runtime_getProfLabel() unsafe.Pointer

runtime_ignoreHangup function #

go:linkname runtime_ignoreHangup internal/poll.runtime_ignoreHangup

func runtime_ignoreHangup()

runtime_pprof_readProfile function #

readProfile, provided to runtime/pprof, returns the next chunk of binary CPU profiling stack trace data, blocking until data is available. If profiling is turned off and all the profile data accumulated while it was on has been returned, readProfile returns eof=true. The caller must save the returned data and tags before calling readProfile again. The returned data contains a whole number of records, and tags contains exactly one entry per record. runtime_pprof_readProfile should be an internal detail, but widely used packages access it using linkname. Notable members of the hall of shame include: - github.com/pyroscope-io/pyroscope Do not remove or change the type signature. See go.dev/issue/67401. go:linkname runtime_pprof_readProfile runtime/pprof.readProfile

func runtime_pprof_readProfile() ([]uint64, []unsafe.Pointer, bool)

runtime_setProfLabel function #

runtime_setProfLabel should be an internal detail, but widely used packages access it using linkname. Notable members of the hall of shame include: - github.com/cloudwego/localsession Do not remove or change the type signature. See go.dev/issue/67401. go:linkname runtime_setProfLabel runtime/pprof.runtime_setProfLabel

func runtime_setProfLabel(labels unsafe.Pointer)

runtime_unignoreHangup function #

go:linkname runtime_unignoreHangup internal/poll.runtime_unignoreHangup

func runtime_unignoreHangup(sig string)

s method #

go:nosplit

func (l *dloggerImpl) s(x string) *dloggerImpl

s method #

go:nosplit

func (l dloggerFake) s(x string) dloggerFake

s0 method #

func (c *sigctxt) s0() uint64

s0 method #

func (c *sigctxt) s0() uint64

s0 method #

func (c *sigctxt) s0() uint64

s1 method #

func (c *sigctxt) s1() uint64

s1 method #

func (c *sigctxt) s1() uint64

s1 method #

func (c *sigctxt) s1() uint64

s10 method #

func (c *sigctxt) s10() uint64

s10 method #

func (c *sigctxt) s10() uint64

s10 method #

func (c *sigctxt) s10() uint64

s11 method #

func (c *sigctxt) s11() uint64

s11 method #

func (c *sigctxt) s11() uint64

s11 method #

func (c *sigctxt) s11() uint64

s2 method #

func (c *sigctxt) s2() uint64

s2 method #

func (c *sigctxt) s2() uint64

s2 method #

func (c *sigctxt) s2() uint64

s3 method #

func (c *sigctxt) s3() uint64

s3 method #

func (c *sigctxt) s3() uint64

s3 method #

func (c *sigctxt) s3() uint64

s4 method #

func (c *sigctxt) s4() uint64

s4 method #

func (c *sigctxt) s4() uint64

s4 method #

func (c *sigctxt) s4() uint64

s5 method #

func (c *sigctxt) s5() uint64

s5 method #

func (c *sigctxt) s5() uint64

s5 method #

func (c *sigctxt) s5() uint64

s6 method #

func (c *sigctxt) s6() uint64

s6 method #

func (c *sigctxt) s6() uint64

s6 method #

func (c *sigctxt) s6() uint64

s7 method #

func (c *sigctxt) s7() uint64

s7 method #

func (c *sigctxt) s7() uint64

s7 method #

func (c *sigctxt) s7() uint64

s8 method #

func (c *sigctxt) s8() uint64

s8 method #

func (c *sigctxt) s8() uint64

s8 method #

func (c *sigctxt) s8() uint64

s9 method #

func (c *sigctxt) s9() uint64

s9 method #

func (c *sigctxt) s9() uint64

s9 method #

func (c *sigctxt) s9() uint64

sameSizeGrow method #

sameSizeGrow reports whether the current growth is to a map of the same size.

func (h *hmap) sameSizeGrow() bool

sameSizeGrowForIssue69110Test function #

go:linkname sameSizeGrowForIssue69110Test

func sameSizeGrowForIssue69110Test(h *hmap) bool

save function #

save updates getg().sched to refer to pc and sp so that a following gogo will restore pc and sp. save must not have write barriers because invoking a write barrier can clobber getg().sched. go:nosplit go:nowritebarrierrec

func save(pc uintptr, sp uintptr, bp uintptr)

saveAncestors function #

saveAncestors copies previous ancestors of the given caller g and includes info for the current caller into a new set of tracebacks for a g being created.

func saveAncestors(callergp *g) *[]ancestorInfo

saveBlockEventStack function #

func saveBlockEventStack(cycles int64, rate int64, stk []uintptr, which bucketType)

save_g function #

func save_g()

save_g function #

func save_g()

save_g function #

func save_g()

save_g function #

func save_g()

save_g function #

func save_g()

save_g function #

func save_g()

save_g function #

func save_g()

save_g function #

func save_g()

saveblockevent function #

saveblockevent records a profile event of the type specified by which. cycles is the quantity associated with this event and rate is the sampling rate, used to adjust the cycles value in the manner determined by the profile type. skip is the number of frames to omit from the traceback associated with the event. The traceback will be recorded from the stack of the goroutine associated with the current m. skip should be positive if this event is recorded from the current stack (e.g. when this is not called from a system stack)

func saveblockevent(cycles int64, rate int64, skip int, which bucketType)

saveg function #

func saveg(pc uintptr, sp uintptr, gp *g, r *profilerecord.StackRecord, pcbuf []uintptr)

savelr method #

func (c *sigctxt) savelr(x uintptr)

savelr method #

func (c *sigctxt) savelr(x uintptr)

savelr method #

func (c *sigctxt) savelr(x uintptr)

sbrk function #

func sbrk(n uintptr) unsafe.Pointer

sbrk function #

func sbrk(n uintptr) unsafe.Pointer

sbrk0 function #

func sbrk0() uintptr

sbrk0 function #

sbrk0 returns the current process brk, or 0 if not implemented.

func sbrk0() uintptr

scanConservative function #

scanConservative scans block [b, b+n) conservatively, treating any pointer-like value in the block as a pointer. If ptrmask != nil, only words that are marked in ptrmask are considered as potential pointers. If state != nil, it's assumed that [b, b+n) is a block in the stack and may contain pointers to stack objects.

func scanConservative(b uintptr, n uintptr, ptrmask *uint8, gcw *gcWork, state *stackScanState)

scanblock function #

scanblock scans b as scanobject would, but using an explicit pointer bitmap instead of the heap bitmap. This is used to scan non-heap roots, so it does not update gcw.bytesMarked or gcw.heapScanWork. If stk != nil, possible stack pointers are also reported to stk.putPtr. go:nowritebarrier

func scanblock(b0 uintptr, n0 uintptr, ptrmask *uint8, gcw *gcWork, stk *stackScanState)

scanframeworker function #

Scan a stack frame: local variables and function arguments/results. go:nowritebarrier

func scanframeworker(frame *stkframe, state *stackScanState, gcw *gcWork)

scanobject function #

scanobject scans the object starting at b, adding pointers to gcw. b must point to the beginning of a heap object or an oblet. scanobject consults the GC bitmap for the pointer mask and the spans for the size of the object. go:nowritebarrier

func scanobject(b uintptr, gcw *gcWork)

scanstack function #

scanstack scans gp's stack, greying all pointers found on the stack. Returns the amount of scan work performed, but doesn't update gcController.stackScanWork or flush any credit. Any background credit produced by this function should be flushed by its caller. scanstack itself can't safely flush because it may result in trying to wake up a goroutine that was just scanned, resulting in a self-deadlock. scanstack will also shrink the stack if it is safe to do so. If it is not, it schedules a stack shrink for the next synchronous safe point. scanstack is marked go:systemstack because it must not be preempted while using a workbuf. go:nowritebarrier go:systemstack

func scanstack(gp *g, gcw *gcWork) int64

scavenge method #

scavenge scavenges nbytes worth of free pages, starting with the highest address first. Successive calls continue from where it left off until the heap is exhausted. force makes all memory available to scavenge, ignoring huge page heuristics. Returns the amount of memory scavenged in bytes. scavenge always tries to scavenge nbytes worth of memory, and will only fail to do so if the heap is exhausted for now.

func (p *pageAlloc) scavenge(nbytes uintptr, shouldStop func() bool, force bool) uintptr

scavengeAll method #

scavengeAll acquires the heap lock (blocking any additional manipulation of the page allocator) and iterates over the whole heap, scavenging every free page available. Must run on the system stack because it acquires the heap lock. go:systemstack

func (h *mheap) scavengeAll()

scavengeOne method #

scavengeOne walks over the chunk at chunk index ci and searches for a contiguous run of pages to scavenge. It will try to scavenge at most max bytes at once, but may scavenge more to avoid breaking huge pages. Once it scavenges some memory it returns how much it scavenged in bytes. searchIdx is the page index to start searching from in ci. Returns the number of bytes scavenged. Must run on the systemstack because it acquires p.mheapLock. go:systemstack

func (p *pageAlloc) scavengeOne(ci chunkIdx, searchIdx uint, max uintptr) uintptr

schedEnableUser function #

schedEnableUser enables or disables the scheduling of user goroutines. This does not stop already running user goroutines, so the caller should first stop the world when disabling user goroutines.

func schedEnableUser(enable bool)

schedEnabled function #

schedEnabled reports whether gp should be scheduled. It returns false is scheduling of gp is disabled. sched.lock must be held.

func schedEnabled(gp *g) bool

sched_getaffinity function #

go:noescape

func sched_getaffinity(pid uintptr, len uintptr, buf *byte) int32

sched_yield function #

go:wasmimport wasi_snapshot_preview1 sched_yield

func sched_yield() errno

sched_yield_trampoline function #

func sched_yield_trampoline()

schedinit function #

The bootstrap sequence is: call osinit call schedinit make & queue new G call runtime·mstart The new G calls runtime·main.

func schedinit()

schedtrace function #

func schedtrace(detailed bool)

schedule function #

One round of scheduler: find a runnable goroutine and execute it. Never returns.

func schedule()

scheduleTimeoutEvent function #

scheduleTimeoutEvent tells the WebAssembly environment to trigger an event after ms milliseconds. It returns a timer id that can be used with clearTimeoutEvent. go:wasmimport gojs runtime.scheduleTimeoutEvent

func scheduleTimeoutEvent(ms int64) int32

secure function #

func secure()

secure function #

func secure()

secureEnv function #

func secureEnv()

seek function #

func seek(fd int32, offset int64, whence int32) int64

sehhandler function #

sehhandler is reached as part of the SEH chain. It is nosplit for the same reason as exceptionhandler. go:nosplit

func sehhandler(_ *exceptionrecord, _ uint64, _ *context, dctxt *_DISPATCHER_CONTEXT) int32

sehtramp function #

func sehtramp()

selectgo function #

selectgo implements the select statement. cas0 points to an array of type [ncases]scase, and order0 points to an array of type [2*ncases]uint16 where ncases must be <= 65536. Both reside on the goroutine's stack (regardless of any escaping in selectgo). For race detector builds, pc0 points to an array of type [ncases]uintptr (also on the stack); for other builds, it's set to nil. selectgo returns the index of the chosen scase, which matches the ordinal position of its respective select{recv,send,default} call. Also, if the chosen scase was a receive operation, it reports whether a value was received.

func selectgo(cas0 *scase, order0 *uint16, pc0 *uintptr, nsends int, nrecvs int, block bool) (int, bool)

selectnbrecv function #

compiler implements select { case v, ok = <-c: ... foo default: ... bar } as if selected, ok = selectnbrecv(&v, c); selected { ... foo } else { ... bar }

func selectnbrecv(elem unsafe.Pointer, c *hchan) (selected bool, received bool)

selectnbsend function #

compiler implements select { case c <- v: ... foo default: ... bar } as if selectnbsend(c, v) { ... foo } else { ... bar }

func selectnbsend(c *hchan, elem unsafe.Pointer) (selected bool)

selectsetpc function #

func selectsetpc(pc *uintptr)

sellock function #

func sellock(scases []scase, lockorder []uint16)

selparkcommit function #

func selparkcommit(gp *g, _ unsafe.Pointer) bool

selunlock function #

func selunlock(scases []scase, lockorder []uint16)

sem_init function #

go:nosplit

func sem_init(sem *semt, pshared int32, value uint32) int32

sem_init function #

go:nosplit

func sem_init(sem *semt, pshared int32, value uint32) int32

sem_post function #

go:nosplit

func sem_post(sem *semt) int32

sem_post function #

go:nosplit

func sem_post(sem *semt) int32

sem_reltimedwait_np function #

go:nosplit

func sem_reltimedwait_np(sem *semt, timeout *timespec) int32

sem_timedwait function #

go:nosplit

func sem_timedwait(sem *semt, timeout *timespec) (int32, int32)

sem_wait function #

go:nosplit

func sem_wait(sem *semt) int32

sem_wait function #

go:nosplit

func sem_wait(sem *semt) (int32, int32)

semacquire function #

Called from runtime.

func semacquire(addr *uint32)

semacquire1 function #

func semacquire1(addr *uint32, lifo bool, profile semaProfileFlags, skipframes int, reason waitReason)

semacreate function #

go:nosplit

func semacreate(mp *m)

semacreate function #

go:nosplit

func semacreate(mp *m)

semacreate function #

go:nosplit

func semacreate(mp *m)

semacreate function #

go:nosplit

func semacreate(mp *m)

semacreate function #

go:nosplit

func semacreate(mp *m)

semacreate function #

go:nosplit

func semacreate(mp *m)

semacreate function #

go:nosplit

func semacreate(mp *m)

semacreate function #

go:nosplit

func semacreate(mp *m)

semasleep function #

go:nosplit

func semasleep(ns int64) int32

semasleep function #

go:nosplit

func semasleep(ns int64) int32

semasleep function #

go:nosplit

func semasleep(ns int64) int32

semasleep function #

go:nosplit

func semasleep(ns int64) int32

semasleep function #

go:nosplit

func semasleep(ns int64) int32

semasleep function #

go:nosplit

func semasleep(ns int64) int

semasleep function #

go:nosplit

func semasleep(ns int64) int32

semasleep function #

go:nosplit

func semasleep(ns int64) int32

semawakeup function #

go:nosplit

func semawakeup(mp *m)

semawakeup function #

go:nosplit

func semawakeup(mp *m)

semawakeup function #

go:nosplit

func semawakeup(mp *m)

semawakeup function #

go:nosplit

func semawakeup(mp *m)

semawakeup function #

go:nosplit

func semawakeup(mp *m)

semawakeup function #

go:nosplit

func semawakeup(mp *m)

semawakeup function #

go:nosplit

func semawakeup(mp *m)

semawakeup function #

go:nosplit

func semawakeup(mp *m)

semrelease function #

func semrelease(addr *uint32)

semrelease1 function #

func semrelease1(addr *uint32, handoff bool, skipframes int)

send function #

send processes a send operation on an empty channel c. The value ep sent by the sender is copied to the receiver sg. The receiver is then woken up to go on its merry way. Channel c must be empty and locked. send unlocks c with unlockf. sg must already be dequeued from c. ep must be non-nil and point to the heap or the caller's stack.

func send(c *hchan, sg *sudog, ep unsafe.Pointer, unlockf func(), skip int)

sendDirect function #

func sendDirect(t *_type, sg *sudog, src unsafe.Pointer)

sendNote function #

Called from sighandler to send a signal back out of the signal handling thread. Reports whether the signal was sent. If not, the caller typically crashes the program.

func sendNote(s *byte) bool

set method #

set sets the pin bit of the pinState to val. If multipin is true, it sets/unsets the multipin bit instead.

func (v *pinState) set(val bool, multipin bool)

set method #

set sets bit i of pageBits.

func (b *pageBits) set(i uint)

set method #

go:nosplit

func (b *mSpanStateBox) set(s mSpanState)

set method #

set sets P id's bit.

func (p pMask) set(id int32)

set method #

go:nosplit

func (pp *puintptr) set(p *p)

set method #

func (p *memHdrPtr) set(x *memHdr)

set method #

go:nosplit

func (gp *guintptr) set(g *g)

set method #

go:nosplit

func (mp *muintptr) set(m *m)

setAll method #

setAll sets all the bits of b.

func (b *pageBits) setAll()

setBlock64 method #

setBlock64 sets the 64-bit aligned block of bits containing the i'th bit that are set in v.

func (b *pageBits) setBlock64(i uint, v uint64)

setCheckmark function #

setCheckmark throws if marking object is a checkmarks violation, and otherwise sets obj's checkmark. It returns true if obj was already checkmarked.

func setCheckmark(obj uintptr, base uintptr, off uintptr, mbits markBits) bool

setCrashFD function #

go:linkname setCrashFD

func setCrashFD(fd uintptr) uintptr

setEmpty method #

setEmpty marks that the scavenger has finished looking at ci for now to prevent the scavenger from getting stuck looking at the same chunk. setEmpty may only run concurrently with find.

func (s *scavengeIndex) setEmpty(ci chunkIdx)

setEmpty method #

setEmpty clears the hasFree flag.

func (sc *scavChunkFlags) setEmpty()

setEventErr method #

setEventErr sets the result of pd.info().eventErr() to b. We only change the error bit if seq == 0 or if seq matches pollFDSeq (issue #59545).

func (pd *pollDesc) setEventErr(b bool, seq uintptr)

setEventHandler function #

go:linkname setEventHandler syscall/js.setEventHandler

func setEventHandler(fn func() bool)

setFlushed method #

setFlushed sets the flushed flag. It returns the current cycle count and the previous value of the flushed flag.

func (c *mProfCycleHolder) setFlushed() (cycle uint32, alreadyFlushed bool)

setGCPercent function #

go:linkname setGCPercent runtime/debug.setGCPercent

func setGCPercent(in int32) (out int32)

setGCPercent method #

setGCPercent updates gcPercent. commit must be called after. Returns the old value of gcPercent. The world must be stopped, or mheap_.lock must be held.

func (c *gcControllerState) setGCPercent(in int32) int32

setGCPhase function #

go:nosplit

func setGCPhase(x uint32)

setGNoWB function #

setGNoWB performs *gp = new without a write barrier. For times when it's impractical to use a guintptr. go:nosplit go:nowritebarrier

func setGNoWB(gp **g, new *g)

setGsignalStack function #

setGsignalStack sets the gsignal stack of the current m to an alternate signal stack returned from the sigaltstack system call. It saves the old values in *old for use by restoreGsignalStack. This is used when handling a signal if non-Go code has set the alternate signal stack. go:nosplit go:nowritebarrierrec

func setGsignalStack(st *stackt, old *gsignalStack)

setMNoWB function #

setMNoWB performs *mp = new without a write barrier. For times when it's impractical to use an muintptr. go:nosplit go:nowritebarrier

func setMNoWB(mp **m, new *m)

setMarked method #

setMarked sets the marked bit in the markbits, atomically.

func (m markBits) setMarked()

setMarkedNonAtomic method #

setMarkedNonAtomic sets the marked bit in the markbits, non-atomically.

func (m markBits) setMarkedNonAtomic()

setMaxIdleMarkWorkers method #

setMaxIdleMarkWorkers sets the maximum number of idle mark workers allowed. This method is optimistic in that it does not wait for the number of idle mark workers to reduce to max before returning; it assumes the workers will deschedule themselves.

func (c *gcControllerState) setMaxIdleMarkWorkers(max int32)

setMaxStack function #

go:linkname setMaxStack runtime/debug.setMaxStack

func setMaxStack(in int) (out int)

setMaxThreads function #

go:linkname setMaxThreads runtime/debug.setMaxThreads

func setMaxThreads(in int) (out int)

setMemoryLimit method #

setMemoryLimit updates memoryLimit. commit must be called after Returns the old value of memoryLimit. The world must be stopped, or mheap_.lock must be held.

func (c *gcControllerState) setMemoryLimit(in int64) int64

setMemoryLimit function #

go:linkname setMemoryLimit runtime/debug.setMemoryLimit

func setMemoryLimit(in int64) (out int64)

setMultiPinned method #

func (v *pinState) setMultiPinned(val bool)

setNonEmpty method #

setNonEmpty sets the hasFree flag.

func (sc *scavChunkFlags) setNonEmpty()

setNonblock function #

go:nosplit

func setNonblock(fd int32)

setNonblock function #

go:nosplit

func setNonblock(fd int32)

setNsec method #

go:nosplit

func (ts *timespec) setNsec(ns int64)

setNsec method #

go:nosplit

func (ts *timespec) setNsec(ns int64)

setNsec method #

go:nosplit

func (ts *timespec) setNsec(ns int64)

setNsec method #

go:nosplit

func (ts *timespec) setNsec(ns int64)

setNsec method #

go:nosplit

func (ts *timespec) setNsec(ns int64)

setNsec method #

go:nosplit

func (ts *timespec) setNsec(ns int64)

setNsec method #

go:nosplit

func (ts *timespec) setNsec(ns int64)

setNsec method #

go:nosplit

func (ts *timespec) setNsec(ns int64)

setNsec method #

go:nosplit

func (ts *timespec) setNsec(ns int64)

setNsec method #

go:nosplit

func (ts *timespec) setNsec(ns int64)

setNsec method #

go:nosplit

func (ts *timespec) setNsec(ns int64)

setNsec method #

go:nosplit

func (ts *timespec) setNsec(ns int64)

setNsec method #

go:nosplit

func (ts *timespec) setNsec(ns int64)

setNsec method #

go:nosplit

func (ts *timespec) setNsec(ns int64)

setNsec method #

go:nosplit

func (ts *timespec) setNsec(ns int64)

setNsec method #

go:nosplit

func (ts *timespec) setNsec(ns int64)

setNsec method #

go:nosplit

func (ts *timespec) setNsec(ns int64)

setNsec method #

go:nosplit

func (ts *timespec) setNsec(ns int64)

setNsec method #

go:nosplit

func (ts *timespec) setNsec(ns int64)

setNsec method #

go:nosplit

func (ts *timespec) setNsec(ns int64)

setNsec method #

go:nosplit

func (ts *timespec) setNsec(ns int64)

setNsec method #

func (ts *timespec) setNsec(ns int64)

setNsec method #

go:nosplit

func (ts *timespec) setNsec(ns int64)

setNsec method #

go:nosplit

func (ts *timespec) setNsec(ns int64)

setNsec method #

go:nosplit

func (ts *timespec) setNsec(ns int64)

setNsec method #

go:nosplit

func (ts *timespec) setNsec(ns int64)

setNsec method #

go:nosplit

func (ts *timespec) setNsec(ns int64)

setNsec method #

go:nosplit

func (ts *timespec) setNsec(ns int64)

setNsec method #

go:nosplit

func (ts *timespec) setNsec(ns int64)

setNsec method #

go:nosplit

func (ts *timespec) setNsec(ns int64)

setNsec method #

go:nosplit

func (ts *timespec) setNsec(ns int64)

setNsec method #

go:nosplit

func (ts *timespec) setNsec(ns int64)

setPanicOnFault function #

go:linkname setPanicOnFault runtime/debug.setPanicOnFault

func setPanicOnFault(new bool) (old bool)

setPinned function #

setPinned marks or unmarks a Go pointer as pinned, when the ptr is a Go pointer. It will be ignored while try to pin a non-Go pointer, and it will be panic while try to unpin a non-Go pointer, which should not happen in normal usage.

func setPinned(ptr unsafe.Pointer, pin bool) bool

setPinned method #

func (v *pinState) setPinned(val bool)

setPinnerBits method #

func (s *mspan) setPinnerBits(p *pinnerBits)

setProcessCPUProfiler function #

func setProcessCPUProfiler(hz int32)

setProcessCPUProfiler function #

func setProcessCPUProfiler(hz int32)

setProcessCPUProfiler function #

func setProcessCPUProfiler(hz int32)

setProcessCPUProfiler function #

func setProcessCPUProfiler(hz int32)

setProcessCPUProfiler function #

func setProcessCPUProfiler(hz int32)

setProcessCPUProfiler function #

func setProcessCPUProfiler(hz int32)

setProcessCPUProfiler function #

func setProcessCPUProfiler(hz int32)

setProcessCPUProfiler function #

func setProcessCPUProfiler(hz int32)

setProcessCPUProfiler function #

func setProcessCPUProfiler(hz int32)

setProcessCPUProfiler function #

func setProcessCPUProfiler(hz int32)

setProcessCPUProfiler function #

func setProcessCPUProfiler(hz int32)

setProcessCPUProfilerTimer function #

setProcessCPUProfilerTimer is called when the profiling timer changes. It is called with prof.signalLock held. hz is the new timer, and is 0 if profiling is being disabled. Enable or disable the signal as required for -buildmode=c-archive.

func setProcessCPUProfilerTimer(hz int32)

setRange method #

setRange sets bits in the range [i, i+n).

func (b *pageBits) setRange(i uint, n uint)

setRecord method #

obj.r = r, but with no write barrier. go:nowritebarrier

func (obj *stackObject) setRecord(r *stackObjectRecord)

setSignalstackSP function #

setSignalstackSP sets the ss_sp field of a stackt. go:nosplit

func setSignalstackSP(s *stackt, sp uintptr)

setSignalstackSP function #

setSignalstackSP sets the ss_sp field of a stackt. go:nosplit

func setSignalstackSP(s *stackt, sp uintptr)

setSignalstackSP function #

setSignalstackSP sets the ss_sp field of a stackt. go:nosplit

func setSignalstackSP(s *stackt, sp uintptr)

setSignalstackSP function #

setSignalstackSP sets the ss_sp field of a stackt. go:nosplit

func setSignalstackSP(s *stackt, sp uintptr)

setSignalstackSP function #

setSignalstackSP sets the ss_sp field of a stackt. go:nosplit

func setSignalstackSP(s *stackt, sp uintptr)

setSignalstackSP function #

setSignalstackSP sets the ss_sp field of a stackt. go:nosplit

func setSignalstackSP(s *stackt, sp uintptr)

setSignalstackSP function #

setSignalstackSP sets the ss_sp field of a stackt. go:nosplit

func setSignalstackSP(s *stackt, sp uintptr)

setSignalstackSP function #

setSignalstackSP sets the ss_sp field of a stackt. go:nosplit

func setSignalstackSP(s *stackt, sp uintptr)

setSpans method #

setSpans modifies the span map so [spanOf(base), spanOf(base+npage*pageSize)) is s.

func (h *mheap) setSpans(base uintptr, npage uintptr, s *mspan)

setStatusTraced method #

setStatusTraced indicates that the resource's status was already traced, for example when a goroutine is created.

func (r *traceSchedResourceState) setStatusTraced(gen uintptr)

setThreadCPUProfiler function #

func setThreadCPUProfiler(hz int32)

setThreadCPUProfiler function #

func setThreadCPUProfiler(hz int32)

setThreadCPUProfiler function #

func setThreadCPUProfiler(hz int32)

setThreadCPUProfiler function #

func setThreadCPUProfiler(hz int32)

setThreadCPUProfiler function #

func setThreadCPUProfiler(hz int32)

setThreadCPUProfiler function #

func setThreadCPUProfiler(hz int32)

setThreadCPUProfiler function #

func setThreadCPUProfiler(hz int32)

setThreadCPUProfiler function #

func setThreadCPUProfiler(hz int32)

setThreadCPUProfiler function #

func setThreadCPUProfiler(hz int32)

setThreadCPUProfiler function #

func setThreadCPUProfiler(hz int32)

setThreadCPUProfiler function #

func setThreadCPUProfiler(hz int32)

setThreadCPUProfilerHz function #

setThreadCPUProfilerHz makes any thread-specific changes required to implement profiling at a rate of hz. No changes required on Unix systems when using setitimer.

func setThreadCPUProfilerHz(hz int32)

setTraceback function #

go:linkname setTraceback runtime/debug.SetTraceback

func setTraceback(level string)

setUserArenaChunkToFault method #

setUserArenaChunkToFault sets the address space for the user arena chunk to fault and releases any underlying memory resources. Must be in a non-preemptible state to ensure the consistency of statistics exported to MemStats.

func (s *mspan) setUserArenaChunkToFault()

set_eip method #

func (c *sigctxt) set_eip(x uint32)

set_eip method #

func (c *sigctxt) set_eip(x uint32)

set_eip method #

func (c *sigctxt) set_eip(x uint32)

set_eip method #

func (c *sigctxt) set_eip(x uint32)

set_esp method #

func (c *sigctxt) set_esp(x uint32)

set_esp method #

func (c *sigctxt) set_esp(x uint32)

set_esp method #

func (c *sigctxt) set_esp(x uint32)

set_esp method #

func (c *sigctxt) set_esp(x uint32)

set_fp method #

386 does not have frame pointer register.

func (c *context) set_fp(x uintptr)

set_fp method #

func (c *context) set_fp(x uintptr)

set_fp method #

arm does not have frame pointer register.

func (c *context) set_fp(x uintptr)

set_fp method #

func (c *context) set_fp(x uintptr)

set_gp method #

func (c *sigctxt) set_gp(x uint64)

set_gp method #

func (c *sigctxt) set_gp(x uint64)

set_gp method #

func (c *sigctxt) set_gp(x uint64)

set_ip method #

func (c *context) set_ip(x uintptr)

set_ip method #

func (c *context) set_ip(x uintptr)

set_ip method #

func (c *context) set_ip(x uintptr)

set_ip method #

func (c *context) set_ip(x uintptr)

set_lr method #

func (c *context) set_lr(x uintptr)

set_lr method #

func (c *sigctxt) set_lr(x uint64)

set_lr method #

func (c *sigctxt) set_lr(x uint32)

set_lr method #

func (c *sigctxt) set_lr(x uint64)

set_lr method #

func (c *context) set_lr(x uintptr)

set_lr method #

func (c *context) set_lr(x uintptr)

set_lr method #

func (c *sigctxt) set_lr(x uint64)

set_lr method #

func (c *sigctxt) set_lr(x uint64)

set_lr method #

func (c *context) set_lr(x uintptr)

set_lr method #

func (c *sigctxt) set_lr(x uint32)

set_lr method #

func (c *sigctxt) set_lr(x uint32)

set_lr method #

func (c *sigctxt) set_lr(x uint64)

set_lr method #

func (c *sigctxt) set_lr(x uint32)

set_pc method #

func (c *sigctxt) set_pc(x uint64)

set_pc method #

func (c *sigctxt) set_pc(x uint32)

set_pc method #

func (c *sigctxt) set_pc(x uint64)

set_pc method #

func (c *sigctxt) set_pc(x uint64)

set_pc method #

func (c *sigctxt) set_pc(x uint64)

set_pc method #

func (c *sigctxt) set_pc(x uint32)

set_pc method #

func (c *sigctxt) set_pc(x uint64)

set_pc method #

func (c *sigctxt) set_pc(x uint32)

set_pc method #

func (c *sigctxt) set_pc(x uint32)

set_pc method #

func (c *sigctxt) set_pc(x uint32)

set_pc method #

func (c *sigctxt) set_pc(x uint64)

set_pc method #

func (c *sigctxt) set_pc(x uint64)

set_pc method #

func (c *sigctxt) set_pc(x uint64)

set_pc method #

func (c *sigctxt) set_pc(x uint64)

set_pc method #

func (c *sigctxt) set_pc(x uint64)

set_pc method #

func (c *sigctxt) set_pc(x uint64)

set_pc method #

func (c *sigctxt) set_pc(x uint64)

set_pc method #

func (c *sigctxt) set_pc(x uint64)

set_pc method #

func (c *sigctxt) set_pc(x uint64)

set_pc method #

func (c *sigctxt) set_pc(x uint64)

set_r0 method #

func (c *sigctxt) set_r0(x uint64)

set_r0 method #

func (c *sigctxt) set_r0(x uint64)

set_r0 method #

func (c *sigctxt) set_r0(x uint64)

set_r0 method #

func (c *sigctxt) set_r0(x uint64)

set_r10 method #

func (c *sigctxt) set_r10(x uint32)

set_r10 method #

func (c *sigctxt) set_r10(x uint32)

set_r10 method #

func (c *sigctxt) set_r10(x uint32)

set_r10 method #

func (c *sigctxt) set_r10(x uint32)

set_r12 method #

func (c *sigctxt) set_r12(x uint64)

set_r12 method #

func (c *sigctxt) set_r12(x uint64)

set_r12 method #

func (c *sigctxt) set_r12(x uint64)

set_r13 method #

func (c *sigctxt) set_r13(x uint64)

set_r22 method #

func (c *sigctxt) set_r22(x uint64)

set_r28 method #

func (c *sigctxt) set_r28(x uint64)

set_r28 method #

func (c *sigctxt) set_r28(x uint64)

set_r28 method #

func (c *sigctxt) set_r28(x uint64)

set_r28 method #

func (c *sigctxt) set_r28(x uint64)

set_r28 method #

func (c *sigctxt) set_r28(x uint64)

set_r28 method #

func (c *sigctxt) set_r28(x uint64)

set_r28 method #

func (c *sigctxt) set_r28(x uint64)

set_r30 method #

func (c *sigctxt) set_r30(x uint64)

set_r30 method #

func (c *sigctxt) set_r30(x uint64)

set_r30 method #

func (c *sigctxt) set_r30(x uint64)

set_r30 method #

func (c *sigctxt) set_r30(x uint64)

set_r30 method #

func (c *sigctxt) set_r30(x uint32)

set_r30 method #

func (c *sigctxt) set_r30(x uint64)

set_r31 method #

func (c *sigctxt) set_r31(x uint64)

set_ra method #

func (c *sigctxt) set_ra(x uint64)

set_ra method #

func (c *sigctxt) set_ra(x uint64)

set_ra method #

func (c *sigctxt) set_ra(x uint64)

set_rip method #

func (c *sigctxt) set_rip(x uint64)

set_rip method #

func (c *sigctxt) set_rip(x uint64)

set_rip method #

func (c *sigctxt) set_rip(x uint64)

set_rip method #

func (c *sigctxt) set_rip(x uint64)

set_rip method #

func (c *sigctxt) set_rip(x uint64)

set_rip method #

func (c *sigctxt) set_rip(x uint64)

set_rip method #

func (c *sigctxt) set_rip(x uint64)

set_rsp method #

func (c *sigctxt) set_rsp(x uint64)

set_rsp method #

func (c *sigctxt) set_rsp(x uint64)

set_rsp method #

func (c *sigctxt) set_rsp(x uint64)

set_rsp method #

func (c *sigctxt) set_rsp(x uint64)

set_rsp method #

func (c *sigctxt) set_rsp(x uint64)

set_rsp method #

func (c *sigctxt) set_rsp(x uint64)

set_rsp method #

func (c *sigctxt) set_rsp(x uint64)

set_sigaddr method #

func (c *sigctxt) set_sigaddr(x uint32)

set_sigaddr method #

func (c *sigctxt) set_sigaddr(x uint64)

set_sigaddr method #

func (c *sigctxt) set_sigaddr(x uint64)

set_sigaddr method #

func (c *sigctxt) set_sigaddr(x uint64)

set_sigaddr method #

func (c *sigctxt) set_sigaddr(x uint64)

set_sigaddr method #

func (c *sigctxt) set_sigaddr(x uint64)

set_sigaddr method #

func (c *sigctxt) set_sigaddr(x uint64)

set_sigaddr method #

func (c *sigctxt) set_sigaddr(x uint64)

set_sigaddr method #

func (c *sigctxt) set_sigaddr(x uint64)

set_sigaddr method #

func (c *sigctxt) set_sigaddr(x uint32)

set_sigaddr method #

func (c *sigctxt) set_sigaddr(x uint32)

set_sigaddr method #

func (c *sigctxt) set_sigaddr(x uint64)

set_sigaddr method #

func (c *sigctxt) set_sigaddr(x uint64)

set_sigaddr method #

func (c *sigctxt) set_sigaddr(x uint32)

set_sigaddr method #

func (c *sigctxt) set_sigaddr(x uint32)

set_sigaddr method #

func (c *sigctxt) set_sigaddr(x uint32)

set_sigaddr method #

func (c *sigctxt) set_sigaddr(x uint64)

set_sigaddr method #

func (c *sigctxt) set_sigaddr(x uint64)

set_sigaddr method #

func (c *sigctxt) set_sigaddr(x uint64)

set_sigaddr method #

func (c *sigctxt) set_sigaddr(x uint32)

set_sigaddr method #

func (c *sigctxt) set_sigaddr(x uint64)

set_sigaddr method #

func (c *sigctxt) set_sigaddr(x uint32)

set_sigaddr method #

func (c *sigctxt) set_sigaddr(x uint64)

set_sigaddr method #

func (c *sigctxt) set_sigaddr(x uint64)

set_sigaddr method #

func (c *sigctxt) set_sigaddr(x uint64)

set_sigaddr method #

func (c *sigctxt) set_sigaddr(x uint64)

set_sigaddr method #

func (c *sigctxt) set_sigaddr(x uint64)

set_sigaddr method #

func (c *sigctxt) set_sigaddr(x uint64)

set_sigaddr method #

func (c *sigctxt) set_sigaddr(x uint64)

set_sigaddr method #

func (c *sigctxt) set_sigaddr(x uint32)

set_sigaddr method #

func (c *sigctxt) set_sigaddr(x uint64)

set_sigcode method #

func (c *sigctxt) set_sigcode(x uint32)

set_sigcode method #

func (c *sigctxt) set_sigcode(x uint32)

set_sigcode method #

func (c *sigctxt) set_sigcode(x uint64)

set_sigcode method #

func (c *sigctxt) set_sigcode(x uint32)

set_sigcode method #

func (c *sigctxt) set_sigcode(x uint32)

set_sigcode method #

func (c *sigctxt) set_sigcode(x uint64)

set_sigcode method #

func (c *sigctxt) set_sigcode(x uint32)

set_sigcode method #

func (c *sigctxt) set_sigcode(x uint64)

set_sigcode method #

func (c *sigctxt) set_sigcode(x uint32)

set_sigcode method #

func (c *sigctxt) set_sigcode(x uint64)

set_sigcode method #

func (c *sigctxt) set_sigcode(x uint64)

set_sigcode method #

func (c *sigctxt) set_sigcode(x uint32)

set_sigcode method #

func (c *sigctxt) set_sigcode(x uint64)

set_sigcode method #

func (c *sigctxt) set_sigcode(x uint64)

set_sigcode method #

func (c *sigctxt) set_sigcode(x uint32)

set_sigcode method #

func (c *sigctxt) set_sigcode(x uint64)

set_sigcode method #

func (c *sigctxt) set_sigcode(x uint32)

set_sigcode method #

func (c *sigctxt) set_sigcode(x uint64)

set_sigcode method #

func (c *sigctxt) set_sigcode(x uint32)

set_sigcode method #

func (c *sigctxt) set_sigcode(x uint64)

set_sigcode method #

func (c *sigctxt) set_sigcode(x uint32)

set_sigcode method #

func (c *sigctxt) set_sigcode(x uint32)

set_sigcode method #

func (c *sigctxt) set_sigcode(x uint32)

set_sigcode method #

func (c *sigctxt) set_sigcode(x uint32)

set_sigcode method #

func (c *sigctxt) set_sigcode(x uint32)

set_sigcode method #

func (c *sigctxt) set_sigcode(x uint64)

set_sigcode method #

func (c *sigctxt) set_sigcode(x uint32)

set_sigcode method #

func (c *sigctxt) set_sigcode(x uint32)

set_sigcode method #

func (c *sigctxt) set_sigcode(x uint64)

set_sigcode method #

func (c *sigctxt) set_sigcode(x uint32)

set_sp method #

func (c *sigctxt) set_sp(x uint64)

set_sp method #

func (c *sigctxt) set_sp(x uint32)

set_sp method #

func (c *sigctxt) set_sp(x uint64)

set_sp method #

func (c *context) set_sp(x uintptr)

set_sp method #

func (c *sigctxt) set_sp(x uint32)

set_sp method #

func (c *sigctxt) set_sp(x uint64)

set_sp method #

func (c *context) set_sp(x uintptr)

set_sp method #

func (c *sigctxt) set_sp(x uint64)

set_sp method #

func (c *sigctxt) set_sp(x uint64)

set_sp method #

func (c *sigctxt) set_sp(x uint64)

set_sp method #

func (c *sigctxt) set_sp(x uint32)

set_sp method #

func (c *context) set_sp(x uintptr)

set_sp method #

func (c *sigctxt) set_sp(x uint64)

set_sp method #

func (c *sigctxt) set_sp(x uint64)

set_sp method #

func (c *sigctxt) set_sp(x uint64)

set_sp method #

func (c *sigctxt) set_sp(x uint64)

set_sp method #

func (c *sigctxt) set_sp(x uint64)

set_sp method #

func (c *sigctxt) set_sp(x uint64)

set_sp method #

func (c *sigctxt) set_sp(x uint32)

set_sp method #

func (c *sigctxt) set_sp(x uint32)

set_sp method #

func (c *sigctxt) set_sp(x uint64)

set_sp method #

func (c *sigctxt) set_sp(x uint64)

set_sp method #

func (c *sigctxt) set_sp(x uint64)

set_sp method #

func (c *context) set_sp(x uintptr)

set_usec method #

func (tv *timeval) set_usec(x int32)

set_usec method #

func (tv *timeval) set_usec(x int32)

set_usec method #

func (tv *timeval) set_usec(x int32)

set_usec method #

func (tv *timeval) set_usec(x int32)

set_usec method #

func (tv *timeval) set_usec(x int32)

set_usec method #

func (tv *timeval) set_usec(x int32)

set_usec method #

func (tv *timeval) set_usec(x int32)

set_usec method #

func (tv *timeval) set_usec(x int32)

set_usec method #

func (tv *timeval) set_usec(x int32)

set_usec method #

func (tv *timeval) set_usec(x int32)

set_usec method #

func (tv *timeval) set_usec(x int32)

set_usec method #

go:nosplit

func (tv *timeval) set_usec(x int32)

set_usec method #

func (tv *timeval) set_usec(x int32)

set_usec method #

func (tv *timeval) set_usec(x int32)

set_usec method #

func (tv *timeval) set_usec(x int32)

set_usec method #

func (tv *timeval) set_usec(x int32)

set_usec method #

func (tv *timeval) set_usec(x int32)

set_usec method #

func (tv *timeval) set_usec(x int32)

set_usec method #

func (tv *timeval) set_usec(x int32)

set_usec method #

func (tv *timeval) set_usec(x int32)

set_usec method #

func (tv *timeval) set_usec(x int32)

set_usec method #

func (tv *timeval) set_usec(x int32)

set_usec method #

func (tv *timeval) set_usec(x int32)

set_usec method #

func (tv *timeval) set_usec(x int32)

set_usec method #

func (tv *timeval) set_usec(x int32)

set_usec method #

func (tv *timeval) set_usec(x int32)

set_usec method #

func (tv *timeval) set_usec(x int32)

set_usec method #

func (tv *timeval) set_usec(x int32)

set_usec method #

func (tv *timeval) set_usec(x int32)

set_usec method #

func (tv *timeval) set_usec(x int32)

set_usec method #

func (tv *timeval) set_usec(x int32)

set_usec method #

func (tv *timeval) set_usec(x int32)

setcpuprofilerate function #

setcpuprofilerate sets the CPU profiling rate to hz times per second. If hz <= 0, setcpuprofilerate turns off CPU profiling.

func setcpuprofilerate(hz int32)

setenv_c function #

Update the C environment if cgo is loaded.

func setenv_c(k string, v string)

setfpmasks function #

func setfpmasks()

setg function #

func setg(gg *g)

setitimer function #

go:noescape

func setitimer(mode int32, new *itimerval, old *itimerval)

setitimer function #

go:nosplit go:cgo_unsafe_args

func setitimer(mode int32, new *itimerval, old *itimerval)

setitimer function #

go:noescape

func setitimer(mode int32, new *itimerval, old *itimerval)

setitimer function #

func setitimer(which int32, value *itimerval, ovalue *itimerval)

setitimer function #

go:noescape

func setitimer(mode int32, new *itimerval, old *itimerval)

setitimer function #

go:noescape

func setitimer(mode int32, new *itimerval, old *itimerval)

setitimer function #

go:nosplit go:cgo_unsafe_args

func setitimer(mode int32, new *itimerval, old *itimerval)

setitimer function #

go:noescape

func setitimer(mode int32, new *itimerval, old *itimerval)

setitimer function #

go:nosplit

func setitimer(mode int32, new *itimerval, old *itimerval)

setitimer_trampoline function #

func setitimer_trampoline()

setitimer_trampoline function #

func setitimer_trampoline()

setldt function #

Called from assembly only; declared for go vet.

func setldt(slot uintptr, base unsafe.Pointer, size uintptr)

setlr method #

func (c *sigctxt) setlr(x uintptr)

setlr method #

func (c *sigctxt) setlr(x uintptr)

setlr method #

func (c *sigctxt) setlr(x uintptr)

setoverflow method #

func (b *bmap) setoverflow(t *maptype, ovf *bmap)

setpc method #

func (c *sigctxt) setpc(x uintptr)

setpc method #

func (c *sigctxt) setpc(x uintptr)

setpc method #

func (c *sigctxt) setpc(x uintptr)

setprofilebucket function #

Set the heap profile bucket associated with addr to b.

func setprofilebucket(p unsafe.Pointer, b *bucket)

setsig function #

go:nosplit go:nowritebarrierrec

func setsig(i uint32, fn uintptr)

setsig function #

go:nosplit go:nowritebarrierrec

func setsig(i uint32, fn uintptr)

setsig function #

go:nosplit go:nowritebarrierrec

func setsig(i uint32, fn uintptr)

setsig function #

go:nosplit go:nowritebarrierrec

func setsig(i uint32, fn uintptr)

setsig function #

go:nosplit go:nowritebarrierrec

func setsig(i uint32, fn uintptr)

setsig function #

go:nosplit go:nowritebarrierrec

func setsig(i uint32, fn uintptr)

setsig function #

go:nosplit go:nowritebarrierrec

func setsig(i uint32, fn uintptr)

setsig function #

go:nosplit go:nowritebarrierrec

func setsig(i uint32, fn uintptr)

setsig function #

go:nosplit go:nowritebarrierrec

func setsig(i uint32, fn uintptr)

setsigpc method #

func (c *sigctxt) setsigpc(x uint64)

setsigpc method #

func (c *sigctxt) setsigpc(x uint64)

setsigpc method #

func (c *sigctxt) setsigpc(x uint64)

setsigpc method #

func (c *sigctxt) setsigpc(x uint64)

setsigsegv function #

setsigsegv is used on darwin/arm64 to fake a segmentation fault. This is exported via linkname to assembly in runtime/cgo. go:nosplit go:linkname setsigsegv

func setsigsegv(pc uintptr)

setsigstack function #

go:nosplit go:nowritebarrierrec

func setsigstack(i uint32)

setsigstack function #

go:nosplit go:nowritebarrierrec

func setsigstack(i uint32)

setsigstack function #

go:nosplit go:nowritebarrierrec

func setsigstack(i uint32)

setsigstack function #

go:nosplit go:nowritebarrierrec

func setsigstack(i uint32)

setsigstack function #

go:nosplit go:nowritebarrierrec

func setsigstack(i uint32)

setsigstack function #

go:nosplit go:nowritebarrierrec

func setsigstack(i uint32)

setsigstack function #

go:nosplit go:nowritebarrierrec

func setsigstack(i uint32)

setsigstack function #

go:nosplit go:nowritebarrierrec

func setsigstack(i uint32)

setsp method #

func (c *sigctxt) setsp(x uintptr)

setsp method #

func (c *sigctxt) setsp(x uintptr)

setsp method #

func (c *sigctxt) setsp(x uintptr)

settls function #

Called from assembly only; declared for go vet.

func settls()

shade function #

Shade the object if it isn't already. The object is not nil and known to be in the heap. Preemption must be disabled. go:nowritebarrier

func shade(b uintptr)

shouldPushSigpanic function #

shouldPushSigpanic reports whether pc should be used as sigpanic's return PC (pushing a frame for the call). Otherwise, it should be left alone so that LR is used as sigpanic's return PC, effectively replacing the top-most frame with sigpanic. This is used by preparePanic.

func shouldPushSigpanic(gp *g, pc uintptr, lr uintptr) bool

shouldScavenge method #

shouldScavenge returns true if the corresponding chunk should be interrogated by the scavenger.

func (sc scavChunkData) shouldScavenge(currGen uint32, force bool) bool

showframe function #

showframe reports whether the frame with the given characteristics should be printed during a traceback.

func showframe(sf srcFunc, gp *g, firstFrame bool, calleeID abi.FuncID) bool

showfuncinfo function #

showfuncinfo reports whether a function with the given characteristics should be printed during a traceback.

func showfuncinfo(sf srcFunc, firstFrame bool, calleeID abi.FuncID) bool

shrinkstack function #

Maybe shrink the stack being used by gp. gp must be stopped and we must own its stack. It may be in _Grunning, but only if this is our own user G.

func shrinkstack(gp *g)

siftDown method #

siftDown puts the timer at position i in the right place in the heap by moving it down toward the bottom of the heap.

func (ts *timers) siftDown(i int)

siftUp method #

siftUp puts the timer at position i in the right place in the heap by moving it up toward the top of the heap.

func (ts *timers) siftUp(i int)

sigFetchG function #

sigFetchG fetches the value of G safely when running in a signal handler. On some architectures, the g value may be clobbered when running in a VDSO. See issue #32912. go:nosplit

func sigFetchG(c *sigctxt) *g

sigFetchG function #

func sigFetchG() *g

sigFetchGSafe function #

sigFetchGSafe is like getg() but without panicking when TLS is not set. Only implemented on windows/386, which is the only arch that loads TLS when calling getg(). Others use a dedicated register.

func sigFetchGSafe() *g

sigFromSeccomp method #

sigFromSeccomp reports whether the signal was sent from seccomp. go:nosplit

func (c *sigctxt) sigFromSeccomp() bool

sigFromSeccomp method #

sigFromSeccomp reports whether the signal was sent from seccomp. go:nosplit

func (c *sigctxt) sigFromSeccomp() bool

sigFromUser method #

sigFromUser reports whether the signal was sent because of a call to kill. go:nosplit

func (c *sigctxt) sigFromUser() bool

sigFromUser method #

sigFromUser reports whether the signal was sent because of a call to kill or tgkill. go:nosplit

func (c *sigctxt) sigFromUser() bool

sigInitIgnored function #

sigInitIgnored marks the signal as already ignored. This is called at program start by initsig. In a shared library initsig is called by libpreinit, so the runtime may not be initialized yet. go:nosplit

func sigInitIgnored(s uint32)

sigInstallGoHandler function #

go:nosplit go:nowritebarrierrec

func sigInstallGoHandler(sig uint32) bool

sigNotOnStack function #

This is called if we receive a signal when there is a signal stack but we are not on it. This can only happen if non-Go code called sigaction without setting the SS_ONSTACK flag.

func sigNotOnStack(sig uint32, sp uintptr, mp *m)

sigNoteSetup function #

sigNoteSetup initializes a single, there-can-only-be-one, async-signal-safe note. The current implementation of notes on Darwin is not async-signal-safe, because the functions pthread_mutex_lock, pthread_cond_signal, and pthread_mutex_unlock, called by semawakeup, are not async-signal-safe. There is only one case where we need to wake up a note from a signal handler: the sigsend function. The signal handler code does not require all the features of notes: it does not need to do a timed wait. This is a separate implementation of notes, based on a pipe, that does not support timed waits but is async-signal-safe.

func sigNoteSetup(*note)

sigNoteSetup function #

func sigNoteSetup(*note)

sigNoteSleep function #

sigNoteSleep waits for a note created by sigNoteSetup to be woken.

func sigNoteSleep(*note)

sigNoteSleep function #

func sigNoteSleep(*note)

sigNoteWakeup function #

sigNoteWakeup wakes up a thread sleeping on a note created by sigNoteSetup.

func sigNoteWakeup(*note)

sigNoteWakeup function #

func sigNoteWakeup(*note)

sigaction function #

go:nosplit go:nowritebarrierrec

func sigaction(sig uint32, new *sigactiont, old *sigactiont)

sigaction function #

go:noescape

func sigaction(sig uint32, new *sigactiont, old *sigactiont)

sigaction function #

go:nosplit go:cgo_unsafe_args

func sigaction(sig uint32, new *usigactiont, old *usigactiont)

sigaction function #

go:noescape

func sigaction(sig uint32, new *sigactiont, old *sigactiont)

sigaction function #

go:noescape

func sigaction(sig uint32, new *sigactiont, old *sigactiont)

sigaction function #

go:nosplit go:nowritebarrierrec

func sigaction(sig uint32, act *sigactiont, oact *sigactiont)

sigaction function #

go:nosplit

func sigaction(sig uintptr, new *sigactiont, old *sigactiont)

sigaction function #

go:nosplit go:nowritebarrierrec

func sigaction(sig uint32, new *sigactiont, old *sigactiont)

sigaction function #

go:nosplit go:cgo_unsafe_args

func sigaction(sig uint32, new *sigactiont, old *sigactiont)

sigaction1 function #

func sigaction1(sig uintptr, new uintptr, old uintptr)

sigaction_trampoline function #

func sigaction_trampoline()

sigaction_trampoline function #

func sigaction_trampoline()

sigaddr method #

func (c *sigctxt) sigaddr() uint64

sigaddr method #

func (c *sigctxt) sigaddr() uint64

sigaddr method #

func (c *sigctxt) sigaddr() uint64

sigaddr method #

func (c *sigctxt) sigaddr() uint32

sigaddr method #

func (c *sigctxt) sigaddr() uint64

sigaddr method #

func (c *sigctxt) sigaddr() uint64

sigaddr method #

func (c *sigctxt) sigaddr() uint64

sigaddr method #

func (c *sigctxt) sigaddr() uint64

sigaddr method #

func (c *sigctxt) sigaddr() uint64

sigaddr method #

func (c *sigctxt) sigaddr() uint32

sigaddr method #

func (c *sigctxt) sigaddr() uint32

sigaddr method #

func (c *sigctxt) sigaddr() uint64

sigaddr method #

func (c *sigctxt) sigaddr() uint64

sigaddr method #

func (c *sigctxt) sigaddr() uint32

sigaddr method #

func (c *sigctxt) sigaddr() uint64

sigaddr method #

func (c *sigctxt) sigaddr() uint64

sigaddr method #

func (c *sigctxt) sigaddr() uint64

sigaddr method #

func (c *sigctxt) sigaddr() uint64

sigaddr method #

func (c *sigctxt) sigaddr() uint64

sigaddr method #

func (c *sigctxt) sigaddr() uint64

sigaddr method #

func (c *sigctxt) sigaddr() uint32

sigaddr method #

func (c *sigctxt) sigaddr() uint64

sigaddr method #

func (c *sigctxt) sigaddr() uint32

sigaddr method #

func (c *sigctxt) sigaddr() uint64

sigaddr method #

func (c *sigctxt) sigaddr() uint64

sigaddr method #

func (c *sigctxt) sigaddr() uint64

sigaddr method #

func (c *sigctxt) sigaddr() uint32

sigaddr method #

func (c *sigctxt) sigaddr() uint32

sigaddr method #

func (c *sigctxt) sigaddr() uint64

sigaddr method #

func (c *sigctxt) sigaddr() uint64

sigaddr method #

func (c *sigctxt) sigaddr() uint32

sigaddset function #

go:nosplit go:nowritebarrierrec

func sigaddset(mask *sigset, i int)

sigaddset function #

go:nosplit go:nowritebarrierrec

func sigaddset(mask *sigset, i int)

sigaddset function #

go:nosplit go:nowritebarrierrec

func sigaddset(mask *sigset, i int)

sigaddset function #

go:nosplit go:nowritebarrierrec

func sigaddset(mask *sigset, i int)

sigaddset function #

go:nosplit go:nowritebarrierrec

func sigaddset(mask *sigset, i int)

sigaddset function #

go:nosplit go:nowritebarrierrec

func sigaddset(mask *sigset, i int)

sigaddset function #

go:nosplit go:nowritebarrierrec

func sigaddset(mask *sigset, i int)

sigaddset function #

go:nosplit go:nowritebarrierrec

func sigaddset(mask *sigset, i int)

sigaddset function #

go:nosplit go:nowritebarrierrec

func sigaddset(mask *sigset, i int)

sigaddset function #

go:nosplit go:nowritebarrierrec

func sigaddset(mask *sigset, i int)

sigaddset function #

go:nosplit go:nowritebarrierrec

func sigaddset(mask *sigset, i int)

sigaltstack function #

go:nosplit

func sigaltstack(new *stackt, old *stackt)

sigaltstack function #

go:nosplit go:cgo_unsafe_args

func sigaltstack(new *stackt, old *stackt)

sigaltstack function #

go:noescape

func sigaltstack(new *stackt, old *stackt)

sigaltstack function #

go:noescape

func sigaltstack(new *stackt, old *stackt)

sigaltstack function #

go:noescape

func sigaltstack(new *stackt, old *stackt)

sigaltstack function #

go:nosplit go:cgo_unsafe_args

func sigaltstack(new *stackt, old *stackt)

sigaltstack function #

go:noescape

func sigaltstack(new *stackt, old *stackt)

sigaltstack function #

go:nosplit go:nowritebarrierrec

func sigaltstack(ss *stackt, oss *stackt)

sigaltstack function #

go:noescape

func sigaltstack(new *stackt, old *stackt)

sigaltstack_trampoline function #

func sigaltstack_trampoline()

sigaltstack_trampoline function #

func sigaltstack_trampoline()

sigblock function #

func sigblock(exiting bool)

sigblock function #

go:nosplit

func sigblock(exiting bool)

sigblock function #

go:nosplit

func sigblock(exiting bool)

sigblock function #

sigblock blocks signals in the current thread's signal mask. This is used to block signals while setting up and tearing down g when a non-Go thread calls a Go function. When a thread is exiting we use the sigsetAllExiting value, otherwise the OS specific definition of sigset_all is used. This is nosplit and nowritebarrierrec because it is called by needm which may be called on a non-Go thread with no g available. go:nosplit go:nowritebarrierrec

func sigblock(exiting bool)

sigcode method #

func (c *sigctxt) sigcode() uint64

sigcode method #

func (c *sigctxt) sigcode() uint32

sigcode method #

func (c *sigctxt) sigcode() uint32

sigcode method #

func (c *sigctxt) sigcode() uint32

sigcode method #

func (c *sigctxt) sigcode() uint32

sigcode method #

func (c *sigctxt) sigcode() uint32

sigcode method #

func (c *sigctxt) sigcode() uint64

sigcode method #

func (c *sigctxt) sigcode() uint64

sigcode method #

func (c *sigctxt) sigcode() uint32

sigcode method #

func (c *sigctxt) sigcode() uint32

sigcode method #

func (c *sigctxt) sigcode() uint32

sigcode method #

func (c *sigctxt) sigcode() uint32

sigcode method #

func (c *sigctxt) sigcode() uint64

sigcode method #

func (c *sigctxt) sigcode() uint32

sigcode method #

func (c *sigctxt) sigcode() uint64

sigcode method #

func (c *sigctxt) sigcode() uint32

sigcode method #

func (c *sigctxt) sigcode() uint32

sigcode method #

func (c *sigctxt) sigcode() uint64

sigcode method #

func (c *sigctxt) sigcode() uint32

sigcode method #

func (c *sigctxt) sigcode() uint64

sigcode method #

func (c *sigctxt) sigcode() uint64

sigcode method #

func (c *sigctxt) sigcode() uint32

sigcode method #

func (c *sigctxt) sigcode() uint64

sigcode method #

func (c *sigctxt) sigcode() uint64

sigcode method #

func (c *sigctxt) sigcode() uint64

sigcode method #

func (c *sigctxt) sigcode() uint32

sigcode method #

func (c *sigctxt) sigcode() uint64

sigcode method #

func (c *sigctxt) sigcode() uint32

sigcode method #

func (c *sigctxt) sigcode() uint32

sigcode method #

func (c *sigctxt) sigcode() uint64

sigcode method #

func (c *sigctxt) sigcode() uint64

sigdelset function #

func sigdelset(mask *sigset, i int)

sigdelset function #

func sigdelset(mask *sigset, i int)

sigdelset function #

func sigdelset(mask *sigset, i int)

sigdelset function #

func sigdelset(mask *sigset, i int)

sigdelset function #

func sigdelset(mask *sigset, i int)

sigdelset function #

func sigdelset(mask *sigset, i int)

sigdelset function #

func sigdelset(mask *sigset, i int)

sigdelset function #

func sigdelset(mask *sigset, i int)

sigdelset function #

func sigdelset(mask *sigset, i int)

sigdelset function #

func sigdelset(mask *sigset, i int)

sigdelset function #

func sigdelset(mask *sigset, i int)

sigdisable function #

sigdisable disables the Go signal handler for the signal sig. It is only called while holding the os/signal.handlers lock, via os/signal.disableSignal and signal_disable.

func sigdisable(sig uint32)

sigdisable function #

func sigdisable(uint32)

sigdisable function #

func sigdisable(sig uint32)

sigdisable function #

func sigdisable(sig uint32)

sigenable function #

func sigenable(uint32)

sigenable function #

sigenable enables the Go signal handler to catch the signal sig. It is only called while holding the os/signal.handlers lock, via os/signal.enableSignal and signal_enable.

func sigenable(sig uint32)

sigenable function #

func sigenable(sig uint32)

sigenable function #

func sigenable(sig uint32)

sigfillset function #

go:nosplit

func sigfillset(mask *[4]uint32)

sigfillset function #

go:nosplit

func sigfillset(mask *uint64)

sigfillset function #

go:nosplit

func sigfillset(mask *[2]uint64)

sigfillset function #

go:nosplit

func sigfillset(mask *uint64)

sigfwd function #

go:noescape

func sigfwd(fn uintptr, sig uint32, info *siginfo, ctx unsafe.Pointer)

sigfwdgo function #

Determines if the signal should be handled by Go and if not, forwards the signal to the handler that was installed before Go's. Returns whether the signal was forwarded. This is called by the signal handler, and the world may be stopped. go:nosplit go:nowritebarrierrec

func sigfwdgo(sig uint32, info *siginfo, ctx unsafe.Pointer) bool

sighandler function #

sighandler is invoked when a signal occurs. The global g will be set to a gsignal goroutine and we will be running on the alternate signal stack. The parameter gp will be the value of the global g when the signal occurred. The sig, info, and ctxt parameters are from the system signal handler: they are the parameters passed when the SA is passed to the sigaction system call. The garbage collector may have stopped the world, so write barriers are not allowed. go:nowritebarrierrec

func sighandler(sig uint32, info *siginfo, ctxt unsafe.Pointer, gp *g)

sighandler function #

May run during STW, so write barriers are not allowed. go:nowritebarrierrec

func sighandler(_ureg *ureg, note *byte, gp *g) int

sigignore function #

func sigignore(sig uint32)

sigignore function #

func sigignore(sig uint32)

sigignore function #

func sigignore(uint32)

sigignore function #

sigignore ignores the signal sig. It is only called while holding the os/signal.handlers lock, via os/signal.ignoreSignal and signal_ignore.

func sigignore(sig uint32)

siglr method #

func (c *sigctxt) siglr() uintptr

siglr method #

func (c *sigctxt) siglr() uintptr

siglr method #

func (c *sigctxt) siglr() uintptr

siglr method #

func (c *sigctxt) siglr() uintptr

siglr method #

func (c *sigctxt) siglr() uintptr

siglr method #

func (c *sigctxt) siglr() uintptr

siglr method #

func (c *sigctxt) siglr() uintptr

siglr method #

func (c *sigctxt) siglr() uintptr

siglr method #

func (c *sigctxt) siglr() uintptr

siglr method #

func (c *sigctxt) siglr() uintptr

signalDuringFork function #

signalDuringFork is called if we receive a signal while doing a fork. We do not want signals at that time, as a signal sent to the process group may be delivered to the child process, causing confusion. This should never be called, because we block signals across the fork; this function is just a safety check. See issue 18600 for background.

func signalDuringFork(sig uint32)

signalM function #

func signalM(mp *m, sig int)

signalM function #

signalM sends a signal to mp.

func signalM(mp *m, sig int)

signalM function #

func signalM(mp *m, sig int)

signalM function #

func signalM(mp *m, sig int)

signalM function #

func signalM(mp *m, sig int)

signalM function #

go:nosplit

func signalM(mp *m, sig int)

signalM function #

func signalM(mp *m, sig int)

signalM function #

func signalM(mp *m, sig int)

signalWaitUntilIdle function #

signalWaitUntilIdle waits until the signal delivery mechanism is idle. This is used to ensure that we do not drop a signal notification due to a race between disabling a signal and receiving a signal. This assumes that signal delivery has already been disabled for the signal(s) in question, and here we are just waiting to make sure that all the signals have been delivered to the user channels by the os/signal package. go:linkname signalWaitUntilIdle os/signal.signalWaitUntilIdle

func signalWaitUntilIdle()

signalWaitUntilIdle function #

signalWaitUntilIdle waits until the signal delivery mechanism is idle. This is used to ensure that we do not drop a signal notification due to a race between disabling a signal and receiving a signal. This assumes that signal delivery has already been disabled for the signal(s) in question, and here we are just waiting to make sure that all the signals have been delivered to the user channels by the os/signal package. go:linkname signalWaitUntilIdle os/signal.signalWaitUntilIdle

func signalWaitUntilIdle()

signal_disable function #

Must only be called from a single goroutine at a time. go:linkname signal_disable os/signal.signal_disable

func signal_disable(s uint32)

signal_disable function #

Must only be called from a single goroutine at a time. go:linkname signal_disable os/signal.signal_disable

func signal_disable(s uint32)

signal_enable function #

Must only be called from a single goroutine at a time. go:linkname signal_enable os/signal.signal_enable

func signal_enable(s uint32)

signal_enable function #

Must only be called from a single goroutine at a time. go:linkname signal_enable os/signal.signal_enable

func signal_enable(s uint32)

signal_ignore function #

Must only be called from a single goroutine at a time. go:linkname signal_ignore os/signal.signal_ignore

func signal_ignore(s uint32)

signal_ignore function #

Must only be called from a single goroutine at a time. go:linkname signal_ignore os/signal.signal_ignore

func signal_ignore(s uint32)

signal_ignored function #

go:linkname signal_ignored os/signal.signal_ignored

func signal_ignored(s uint32) bool

signal_ignored function #

Checked by signal handlers. go:linkname signal_ignored os/signal.signal_ignored

func signal_ignored(s uint32) bool

signal_recv function #

Called to receive the next queued signal. Must only be called from a single goroutine at a time. go:linkname signal_recv os/signal.signal_recv

func signal_recv() uint32

signal_recv function #

Called to receive the next queued signal. Must only be called from a single goroutine at a time. go:linkname signal_recv os/signal.signal_recv

func signal_recv() string

signalstack function #

signalstack sets the current thread's alternate signal stack to s. go:nosplit

func signalstack(s *stack)

signame function #

func signame(sig uint32) string

signame function #

func signame(sig uint32) string

signame function #

func signame(sig uint32) string

signame function #

func signame(sig uint32) string

sigpanic function #

sigpanic turns a synchronous signal into a run-time panic. If the signal handler sees a synchronous panic, it arranges the stack to look like the function where the signal occurred called sigpanic, sets the signal's PC value to sigpanic, and returns from the signal handler. The effect is that the program will act as though the function that got the signal simply called sigpanic instead. This must NOT be nosplit because the linker doesn't know where sigpanic calls can be injected. The signal handler must not inject a call to sigpanic if getg().throwsplit, since sigpanic may need to grow the stack. This is exported via linkname to assembly in runtime/cgo. go:linkname sigpanic

func sigpanic()

sigpanic function #

func sigpanic()

sigpanic function #

func sigpanic()

sigpanic function #

func sigpanic()

sigpanic0 function #

Injected by the signal handler for panicking signals. Initializes any registers that have fixed meaning at calls but are scratch in bodies and calls sigpanic. On many platforms it just jumps to sigpanic.

func sigpanic0()

sigpanictramp function #

func sigpanictramp()

sigpanictramp function #

func sigpanictramp()

sigpanictramp function #

func sigpanictramp()

sigpc method #

go:nosplit go:nowritebarrierrec

func (c *sigctxt) sigpc() uintptr

sigpc method #

go:nosplit go:nowritebarrierrec

func (c *sigctxt) sigpc() uintptr

sigpc method #

go:nosplit go:nowritebarrierrec

func (c *sigctxt) sigpc() uintptr

sigpc method #

go:nosplit go:nowritebarrierrec

func (c *sigctxt) sigpc() uintptr

sigpc method #

go:nosplit go:nowritebarrierrec

func (c *sigctxt) sigpc() uintptr

sigpc method #

go:nosplit go:nowritebarrierrec

func (c *sigctxt) sigpc() uintptr

sigpc method #

go:nosplit go:nowritebarrierrec

func (c *sigctxt) sigpc() uintptr

sigpc method #

go:nosplit go:nowritebarrierrec

func (c *sigctxt) sigpc() uintptr

sigpc method #

func (c *sigctxt) sigpc() uintptr

sigpc method #

go:nosplit go:nowritebarrierrec

func (c *sigctxt) sigpc() uintptr

sigpipe function #

func sigpipe()

sigprocmask function #

go:nosplit go:nowritebarrierrec

func sigprocmask(how int32, set *sigset, oset *sigset)

sigprocmask function #

go:nosplit go:cgo_unsafe_args

func sigprocmask(how uint32, new *sigset, old *sigset)

sigprocmask function #

go:nosplit

func sigprocmask(how int32, new *sigset, old *sigset)

sigprocmask function #

go:nosplit go:cgo_unsafe_args

func sigprocmask(how uint32, new *sigset, old *sigset)

sigprocmask function #

go:noescape

func sigprocmask(how int32, new *sigset, old *sigset)

sigprocmask function #

go:noescape

func sigprocmask(how int32, new *sigset, old *sigset)

sigprocmask function #

go:nosplit go:nowritebarrierrec

func sigprocmask(how int32, new *sigset, old *sigset)

sigprocmask function #

go:nosplit go:nowritebarrierrec

func sigprocmask(how int32, new *sigset, old *sigset)

sigprocmask function #

go:noescape

func sigprocmask(how int32, new *sigset, old *sigset)

sigprocmask1 function #

On multi-thread program, sigprocmask must not be called. It's replaced by sigthreadmask.

func sigprocmask1(how uintptr, new uintptr, old uintptr)

sigprocmask_trampoline function #

func sigprocmask_trampoline()

sigprocmask_trampoline function #

func sigprocmask_trampoline()

sigprof function #

Called if we receive a SIGPROF signal. Called by the signal handler, may run during STW. go:nowritebarrierrec

func sigprof(pc uintptr, sp uintptr, lr uintptr, gp *g, mp *m)

sigprofNonGo function #

sigprofNonGo is called if we receive a SIGPROF signal on a non-Go thread, and the signal handler collected a stack trace in sigprofCallers. When this is called, sigprofCallersUse will be non-zero. g is nil, and what we can do is very limited. It is called from the signal handling functions written in assembly code that are active for cgo programs, cgoSigtramp and sigprofNonGoWrapper, which have not verified that the SIGPROF delivery corresponds to the best available profiling source for this thread. go:nosplit go:nowritebarrierrec

func sigprofNonGo(sig uint32, info *siginfo, ctx unsafe.Pointer)

sigprofNonGoPC function #

sigprofNonGoPC is called when a profiling signal arrived on a non-Go thread and we have a single PC value, not a stack trace. g is nil, and what we can do is very limited. go:nosplit go:nowritebarrierrec

func sigprofNonGoPC(pc uintptr)

sigresume function #

func sigresume()

sigreturn__sigaction function #

func sigreturn__sigaction()

sigsave function #

sigsave saves the current thread's signal mask into *p. This is used to preserve the non-Go signal mask when a non-Go thread calls a Go function. This is nosplit and nowritebarrierrec because it is called by needm which may be called on a non-Go thread with no g available. go:nosplit go:nowritebarrierrec

func sigsave(p *sigset)

sigsave function #

go:nosplit

func sigsave(p *sigset)

sigsave function #

func sigsave(p *sigset)

sigsave function #

go:nosplit

func sigsave(p *sigset)

sigsend function #

sigsend delivers a signal from sighandler to the internal signal delivery queue. It reports whether the signal was sent. If not, the caller typically crashes the program. It runs from the signal handler, so it's limited in what it can do.

func sigsend(s uint32) bool

sigsp method #

func (c *sigctxt) sigsp() uintptr

sigsp method #

func (c *sigctxt) sigsp() uintptr

sigsp method #

func (c *sigctxt) sigsp() uintptr

sigsp method #

func (c *sigctxt) sigsp() uintptr

sigsp method #

func (c *sigctxt) sigsp() uintptr

sigsp method #

func (c *sigctxt) sigsp() uintptr

sigsp method #

func (c *sigctxt) sigsp() uintptr

sigsp method #

func (c *sigctxt) sigsp() uintptr

sigsp method #

func (c *sigctxt) sigsp() uintptr

sigsp method #

func (c *sigctxt) sigsp() uintptr

sigtramp function #

func sigtramp()

sigtramp function #

func sigtramp()

sigtramp function #

func sigtramp()

sigtramp function #

go:noescape

func sigtramp(ureg unsafe.Pointer, note unsafe.Pointer)

sigtramp function #

sigtramp is the callback from libc when a signal is received. It is called with the C calling convention.

func sigtramp()

sigtramp function #

func sigtramp()

sigtramp function #

func sigtramp()

sigtramp function #

func sigtramp()

sigtrampgo function #

sigtrampgo is called from the signal handler function, sigtramp, written in assembly code. This is called by the signal handler, and the world may be stopped. It must be nosplit because getg() is still the G that was running (if any) when the signal was delivered, but it's (usually) called on the gsignal stack. Until this switches the G to gsignal, the stack bounds check won't work. go:nosplit go:nowritebarrierrec

func sigtrampgo(sig uint32, info *siginfo, ctx unsafe.Pointer)

sigtrampgo function #

sigtrampgo is called from the exception handler function, sigtramp, written in assembly code. Return EXCEPTION_CONTINUE_EXECUTION if the exception is handled, else return EXCEPTION_CONTINUE_SEARCH. It is nosplit for the same reason as exceptionhandler. go:nosplit

func sigtrampgo(ep *exceptionpointers, kind int) int32

size method #

size returns the size of the range represented in bytes.

func (a addrRange) size() uintptr

sizeclass method #

go:nosplit

func (sc spanClass) sizeclass() int8

skip method #

go:nosplit

func (r *debugLogReader) skip() uint64

sleep method #

sleep puts the scavenger to sleep based on the amount of time that it worked in nanoseconds. Note that this function should only be called by the scavenger. The scavenger may be woken up earlier by a pacing change, and it may not go to sleep at all if there's a pending pacing change.

func (s *scavengerState) sleep(worked float64)

sleep method #

sleep sleeps for the provided duration in nanoseconds or until another goroutine calls wake. Must not be called by more than one goroutine at a time and must not be called concurrently with close.

func (s *wakeableSleep) sleep(ns int64)

sleep function #

func sleep(ms int32) int32

slice method #

slice allocates a new slice backing store. slice must be a pointer to a slice (i.e. *[]T), because userArenaSlice will update the slice directly. cap determines the capacity of the slice backing store and must be non-negative. This operation is not safe to call concurrently with other operations on the same arena.

func (a *userArena) slice(sl any, cap int)

slicebytetostring function #

slicebytetostring converts a byte slice to a string. It is inserted by the compiler into generated code. ptr is a pointer to the first element of the slice; n is the length of the slice. Buf is a fixed-size buffer for the result, it is not nil if the result does not escape.

func slicebytetostring(buf *tmpBuf, ptr *byte, n int) string

slicebytetostringtmp function #

slicebytetostringtmp returns a "string" referring to the actual []byte bytes. Callers need to ensure that the returned string will not be used after the calling goroutine modifies the original slice or synchronizes with another goroutine. The function is only called when instrumenting and otherwise intrinsified by the compiler. Some internal compiler optimizations use this function. - Used for m[T1{... Tn{..., string(k), ...} ...}] and m[string(k)] where k is []byte, T1 to Tn is a nesting of struct and array literals. - Used for "<"+string(b)+">" concatenation where b is []byte. - Used for string(b)=="foo" comparison where b is []byte.

func slicebytetostringtmp(ptr *byte, n int) string

slicecopy function #

slicecopy is used to copy from a string or slice of pointerless elements into a slice.

func slicecopy(toPtr unsafe.Pointer, toLen int, fromPtr unsafe.Pointer, fromLen int, width uintptr) int

slicerunetostring function #

func slicerunetostring(buf *tmpBuf, a []rune) string

slowdodiv function #

go:nosplit

func slowdodiv(n uint64, d uint64) (q uint64, r uint64)

socket function #

func socket(domain int32, typ int32, prot int32) int32

sortkey method #

func (c *hchan) sortkey() uintptr

sp method #

func (c *sigctxt) sp() uint32

sp method #

func (c *sigctxt) sp() uint64

sp method #

func (c *sigctxt) sp() uint32

sp method #

func (c *sigctxt) sp() uint64

sp method #

func (c *sigctxt) sp() uint64

sp method #

func (c *sigctxt) sp() uint32

sp method #

func (c *sigctxt) sp() uint64

sp method #

func (c *sigctxt) sp() uint64

sp method #

func (c *sigctxt) sp() uint64

sp method #

func (c *sigctxt) sp() uint64

sp method #

func (c *sigctxt) sp() uint64

sp method #

func (c *sigctxt) sp() uint64

sp method #

func (c *sigctxt) sp() uint64

sp method #

func (c *context) sp() uintptr

sp method #

func (c *sigctxt) sp() uint64

sp method #

func (c *sigctxt) sp() uintptr

sp method #

func (c *context) sp() uintptr

sp method #

func (c *context) sp() uintptr

sp method #

func (c *sigctxt) sp() uint64

sp method #

func (c *sigctxt) sp() uint32

sp method #

func (c *sigctxt) sp() uint64

sp method #

func (c *context) sp() uintptr

sp method #

func (c *sigctxt) sp() uintptr

sp method #

func (c *sigctxt) sp() uint64

sp method #

func (c *sigctxt) sp() uint64

sp method #

func (c *sigctxt) sp() uint32

sp method #

func (c *sigctxt) sp() uintptr

spanHasNoSpecials function #

spanHasNoSpecials marks a span as having no specials in the arena bitmap.

func spanHasNoSpecials(s *mspan)

spanHasSpecials function #

spanHasSpecials marks a span as having specials in the arena bitmap.

func spanHasSpecials(s *mspan)

spanOf function #

spanOf returns the span of p. If p does not point into the heap arena or no span has ever contained p, spanOf returns nil. If p does not point to allocated memory, this may return a non-nil span that does *not* contain p. If this is a possibility, the caller should either call spanOfHeap or check the span bounds explicitly. Must be nosplit because it has callers that are nosplit. go:nosplit

func spanOf(p uintptr) *mspan

spanOfHeap function #

spanOfHeap is like spanOf, but returns nil if p does not point to a heap object. Must be nosplit because it has callers that are nosplit. go:nosplit

func spanOfHeap(p uintptr) *mspan

spanOfUnchecked function #

spanOfUnchecked is equivalent to spanOf, but the caller must ensure that p points into an allocated heap arena. Must be nosplit because it has callers that are nosplit. go:nosplit

func spanOfUnchecked(p uintptr) *mspan

specialFindSplicePoint method #

Find a splice point in the sorted list and check for an already existing record. Returns a pointer to the next-reference in the list predecessor. Returns true, if the referenced item is an exact match.

func (span *mspan) specialFindSplicePoint(offset uintptr, kind byte) (**special, bool)

spillArgs function #

Spills/loads arguments in registers to/from an internal/abi.RegArgs respectively. Does not follow the Go ABI.

func spillArgs()

spillArgs function #

Used by reflectcall and the reflect package. Spills/loads arguments in registers to/from an internal/abi.RegArgs respectively. Does not follow the Go ABI.

func spillArgs()

spillArgs function #

Used by reflectcall and the reflect package. Spills/loads arguments in registers to/from an internal/abi.RegArgs respectively. Does not follow the Go ABI.

func spillArgs()

spillArgs function #

Used by reflectcall and the reflect package. Spills/loads arguments in registers to/from an internal/abi.RegArgs respectively. Does not follow the Go ABI.

func spillArgs()

spillArgs function #

Used by reflectcall and the reflect package. Spills/loads arguments in registers to/from an internal/abi.RegArgs respectively. Does not follow the Go ABI.

func spillArgs()

split method #

split returns the underlying span class as well as whether we're interested in the full or partial unswept lists for that class, indicated as a boolean (true means "full").

func (s sweepClass) split() (spc spanClass, full bool)

split method #

split splits the headTailIndex value into its parts.

func (h headTailIndex) split() (head uint32, tail uint32)

srcFunc method #

srcFunc returns the srcFunc representing the given frame. srcFunc should be an internal detail, but widely used packages access it using linkname. Notable members of the hall of shame include: - github.com/phuslu/log Do not remove or change the type signature. See go.dev/issue/67401. The go:linkname is below.

func (u *inlineUnwinder) srcFunc(uf inlineFrame) srcFunc

srcFunc method #

func (f funcInfo) srcFunc() srcFunc

stack method #

stack takes a stack trace skipping the provided number of frames. It then returns a traceArg representing that stack which may be passed to write.

func (tl traceLocker) stack(skip int) traceArg

stackalloc function #

stackalloc allocates an n byte stack. stackalloc must run on the system stack because it uses per-P resources and must not split the stack. go:systemstack

func stackalloc(n uint32) stack

stackcache_clear function #

go:systemstack

func stackcache_clear(c *mcache)

stackcacherefill function #

stackcacherefill/stackcacherelease implement a global pool of stack segments. The pool is required to prevent unlimited growth of per-thread caches. go:systemstack

func stackcacherefill(c *mcache, order uint8)

stackcacherelease function #

go:systemstack

func stackcacherelease(c *mcache, order uint8)

stackcheck function #

func stackcheck()

stackcheck function #

func stackcheck()

stackcheck function #

stackcheck checks that SP is in range [g->stack.lo, g->stack.hi).

func stackcheck()

stackcheck function #

stackcheck checks that SP is in range [g->stack.lo, g->stack.hi).

func stackcheck()

stackfree function #

stackfree frees an n byte stack allocation at stk. stackfree must run on the system stack because it uses per-P resources and must not split the stack. go:systemstack

func stackfree(stk stack)

stackinit function #

func stackinit()

stacklog2 function #

stacklog2 returns ⌊log_2(n)⌋.

func stacklog2(n uintptr) int

stackmapdata function #

go:nowritebarrier

func stackmapdata(stkmap *stackmap, n int32) bitvector

stackpoolalloc function #

Allocates a stack from the free pool. Must be called with stackpool[order].item.mu held.

func stackpoolalloc(order uint8) gclinkptr

stackpoolfree function #

Adds stack x to the free pool. Must be called with stackpool[order].item.mu held.

func stackpoolfree(x gclinkptr, order uint8)

start method #

func (ord *randomOrder) start(i uint32) randomEnum

start method #

start begins tracking a new limiter event of the current type. If an event is already in flight, then a new event cannot begin because the current time is already being attributed to that event. In this case, this function returns false. Otherwise, it returns true. The caller must be non-preemptible until at least stop is called or this function returns false. Because this is trying to measure "on-CPU" time of some event, getting scheduled away during it can mean that whatever we're measuring isn't a reflection of "on-CPU" time. The OS could deschedule us at any time, but we want to maintain as close of an approximation as we can.

func (e *limiterEvent) start(typ limiterEventType, now int64) bool

start method #

start extracts the start value from a packed sum.

func (p pallocSum) start() uint

start method #

start initializes a panic to start unwinding the stack. If p.goexit is true, then start may return multiple times.

func (p *_panic) start(pc uintptr, sp unsafe.Pointer)

start method #

start starts a new traceAdvancer.

func (s *traceAdvancerState) start()

startCheckmarks function #

startCheckmarks prepares for the checkmarks phase. The world must be stopped.

func startCheckmarks()

startCycle method #

startCycle resets the GC controller's state and computes estimates for a new GC cycle. The caller must hold worldsema and the world must be stopped.

func (c *gcControllerState) startCycle(markStartTime int64, procs int, trigger gcTrigger)

startGCTransition method #

startGCTransition notifies the limiter of a GC transition. This call takes ownership of the limiter and disables all other means of updating the limiter. Release ownership by calling finishGCTransition. It is safe to call concurrently with other operations.

func (l *gcCPULimiterState) startGCTransition(enableGC bool, now int64)

startLine method #

startLine returns the starting line number of the function. i.e., the line number of the func keyword.

func (f *Func) startLine() int32

startPC method #

startPC takes a start PC for a goroutine and produces a unique stack ID for it. It then returns a traceArg representing that stack which may be passed to write.

func (tl traceLocker) startPC(pc uintptr) traceArg

startPCForTrace function #

startPCForTrace returns the start PC of a goroutine for tracing purposes. If pc is a wrapper, it returns the PC of the wrapped function. Otherwise it returns pc.

func startPCForTrace(pc uintptr) uintptr

startTemplateThread function #

startTemplateThread starts the template thread if it is not already running. The calling thread must itself be in a known-good state.

func startTemplateThread()

startTheWorld function #

startTheWorld undoes the effects of stopTheWorld. w must be the worldStop returned by stopTheWorld.

func startTheWorld(w worldStop)

startTheWorldGC function #

startTheWorldGC undoes the effects of stopTheWorldGC. w must be the worldStop returned by stopTheWorld.

func startTheWorldGC(w worldStop)

startTheWorldWithSema function #

reason is the same STW reason passed to stopTheWorld. start is the start time returned by stopTheWorld. now is the current time; prefer to pass 0 to capture a fresh timestamp. stattTheWorldWithSema returns now.

func startTheWorldWithSema(now int64, w worldStop) int64

startlockedm function #

Schedules the locked m to run the locked gp. May run during STW, so write barriers are not allowed. go:nowritebarrierrec

func startlockedm(gp *g)

startm function #

Schedules some M to run the p (creates an M if necessary). If p==nil, tries to get an idle P, if no idle P's does nothing. May run with m.p==nil, so write barriers are not allowed. If spinning is set, the caller has incremented nmspinning and must provide a P. startm will set m.spinning in the newly started M. Callers passing a non-nil P must call from a non-preemptible context. See comment on acquirem below. Argument lockheld indicates whether the caller already acquired the scheduler lock. Callers holding the lock when making the call must pass true. The lock might be temporarily dropped, but will be reacquired before returning. Must not have write barriers because this may be called without a P. go:nowritebarrierrec

func startm(pp *p, spinning bool, lockheld bool)

startpanic_m function #

startpanic_m prepares for an unrecoverable panic. It returns true if panic messages should be printed, or false if the runtime is in bad shape and should just print stacks. It must not have write barriers even though the write barrier explicitly ignores writes once dying > 0. Write barriers still assume that g.m.p != nil, and this function may not have P in some contexts (e.g. a panic in a signal handler for a signal sent to an M with no P). go:nowritebarrierrec

func startpanic_m() bool

statusWasTraced method #

statusWasTraced returns true if the sched resource's status was already acquired for tracing.

func (r *traceSchedResourceState) statusWasTraced(gen uintptr) bool

stdcall function #

Calling stdcall on os stack. May run during STW, so write barriers are not allowed. go:nowritebarrier go:nosplit

func stdcall(fn stdFunction) uintptr

stdcall0 function #

go:nosplit

func stdcall0(fn stdFunction) uintptr

stdcall1 function #

go:nosplit go:cgo_unsafe_args

func stdcall1(fn stdFunction, a0 uintptr) uintptr

stdcall2 function #

go:nosplit go:cgo_unsafe_args

func stdcall2(fn stdFunction, a0 uintptr, a1 uintptr) uintptr

stdcall3 function #

go:nosplit go:cgo_unsafe_args

func stdcall3(fn stdFunction, a0 uintptr, a1 uintptr, a2 uintptr) uintptr

stdcall4 function #

go:nosplit go:cgo_unsafe_args

func stdcall4(fn stdFunction, a0 uintptr, a1 uintptr, a2 uintptr, a3 uintptr) uintptr

stdcall5 function #

go:nosplit go:cgo_unsafe_args

func stdcall5(fn stdFunction, a0 uintptr, a1 uintptr, a2 uintptr, a3 uintptr, a4 uintptr) uintptr

stdcall6 function #

go:nosplit go:cgo_unsafe_args

func stdcall6(fn stdFunction, a0 uintptr, a1 uintptr, a2 uintptr, a3 uintptr, a4 uintptr, a5 uintptr) uintptr

stdcall7 function #

go:nosplit go:cgo_unsafe_args

func stdcall7(fn stdFunction, a0 uintptr, a1 uintptr, a2 uintptr, a3 uintptr, a4 uintptr, a5 uintptr, a6 uintptr) uintptr

stdcall8 function #

go:nosplit go:cgo_unsafe_args

func stdcall8(fn stdFunction, a0 uintptr, a1 uintptr, a2 uintptr, a3 uintptr, a4 uintptr, a5 uintptr, a6 uintptr, a7 uintptr) uintptr

stdcall_no_g function #

stdcall_no_g calls asmstdcall on os stack without using g. go:nosplit

func stdcall_no_g(fn stdFunction, n int, args uintptr) uintptr

stealID method #

stealID steals an ID from the table, ensuring that it will not appear in the table anymore.

func (tab *traceMap) stealID() uint64

stealWork function #

stealWork attempts to steal a runnable goroutine or timer from any P. If newWork is true, new work may have been readied. If now is not 0 it is the current time. stealWork returns the passed time or the current time if now was passed as 0.

func stealWork(now int64) (gp *g, inheritTime bool, rnow int64, pollUntil int64, newWork bool)

step function #

step advances to the next pc, value pair in the encoded table.

func step(p []byte, pc *uintptr, val *int32, first bool) (newp []byte, ok bool)

stk method #

stk returns the slice in b holding the stack. The caller can assume that the backing array is immutable.

func (b *bucket) stk() []uintptr

stkbucket function #

Return the bucket for stk[0:nstk], allocating new bucket if needed.

func stkbucket(typ bucketType, size uintptr, stk []uintptr, alloc bool) *bucket

stkobjinit function #

func stkobjinit()

stop method #

stop stops a traceAdvancer and blocks until it exits.

func (s *traceAdvancerState) stop()

stop method #

stop stops the active limiter event. Throws if the The caller must be non-preemptible across the event. See start as to why.

func (e *limiterEvent) stop(typ limiterEventType, now int64)

stop method #

stop stops the timer t. It may be on some other P, so we can't actually remove it from the timers heap. We can only mark it as stopped. It will be removed in due course by the P whose heap it is on. Reports whether the timer was stopped before it was run.

func (t *timer) stop() bool

stopTheWorld function #

stopTheWorld stops all P's from executing goroutines, interrupting all goroutines at GC safe points and records reason as the reason for the stop. On return, only the current goroutine's P is running. stopTheWorld must not be called from a system stack and the caller must not hold worldsema. The caller must call startTheWorld when other P's should resume execution. stopTheWorld is safe for multiple goroutines to call at the same time. Each will execute its own stop, and the stops will be serialized. This is also used by routines that do stack dumps. If the system is in panic or being exited, this may not reliably stop all goroutines. Returns the STW context. When starting the world, this context must be passed to startTheWorld.

func stopTheWorld(reason stwReason) worldStop

stopTheWorldGC function #

stopTheWorldGC has the same effect as stopTheWorld, but blocks until the GC is not running. It also blocks a GC from starting until startTheWorldGC is called.

func stopTheWorldGC(reason stwReason) worldStop

stopTheWorldWithSema function #

stopTheWorldWithSema is the core implementation of stopTheWorld. The caller is responsible for acquiring worldsema and disabling preemption first and then should stopTheWorldWithSema on the system stack: semacquire(&worldsema, 0) m.preemptoff = "reason" var stw worldStop systemstack(func() { stw = stopTheWorldWithSema(reason) }) When finished, the caller must either call startTheWorld or undo these three operations separately: m.preemptoff = "" systemstack(func() { now = startTheWorldWithSema(stw) }) semrelease(&worldsema) It is allowed to acquire worldsema once and then execute multiple startTheWorldWithSema/stopTheWorldWithSema pairs. Other P's are able to execute between successive calls to startTheWorldWithSema and stopTheWorldWithSema. Holding worldsema causes any other goroutines invoking stopTheWorld to block. Returns the STW context. When starting the world, this context must be passed to startTheWorldWithSema. go:systemstack

func stopTheWorldWithSema(reason stwReason) worldStop

stopTimer function #

stopTimer stops a timer. It reports whether t was stopped before being run. go:linkname stopTimer time.stopTimer

func stopTimer(t *timeTimer) bool

stoplockedm function #

Stops execution of the current m that is locked to a g until the g is runnable again. Returns with acquired P.

func stoplockedm()

stopm function #

Stops execution of the current m until new work is available. Returns with acquired P.

func stopm()

store method #

func (prof *mLockProfile) store()

store method #

store packs and writes a new scavChunkData. store must be serialized with other calls to store.

func (sc *atomicScavChunkData) store(ssc scavChunkData)

store method #

func (x *profAtomic) store(new profIndex)

strequal function #

func strequal(p unsafe.Pointer, q unsafe.Pointer) bool

strhash function #

strhash should be an internal detail, but widely used packages access it using linkname. Notable members of the hall of shame include: - github.com/aristanetworks/goarista - github.com/bytedance/sonic - github.com/bytedance/go-tagexpr/v2 - github.com/cloudwego/dynamicgo - github.com/v2fly/v2ray-core/v5 Do not remove or change the type signature. See go.dev/issue/67401. go:linkname strhash

func strhash(p unsafe.Pointer, h uintptr) uintptr

strhashFallback function #

func strhashFallback(a unsafe.Pointer, h uintptr) uintptr

string method #

func (t rtype) string() string

string method #

string returns a traceArg representing s which may be passed to write. The string is assumed to be relatively short and popular, so it may be stored for a while in the string dictionary.

func (tl traceLocker) string(s string) traceArg

stringData method #

stringData appends s's data directly to buf. nosplit because it's part of writing an event for an M, which must not have any stack growth. go:nosplit

func (buf *traceBuf) stringData(s string)

stringDataOnStack function #

stringDataOnStack reports whether the string's data is stored on the current goroutine's stack.

func stringDataOnStack(s string) bool

stringHash function #

Testing adapters for hash quality tests (see hash_test.go) stringHash should be an internal detail, but widely used packages access it using linkname. Notable members of the hall of shame include: - github.com/k14s/starlark-go Do not remove or change the type signature. See go.dev/issue/67401. go:linkname stringHash

func stringHash(s string, seed uintptr) uintptr

stringStructOf function #

func stringStructOf(sp *string) *stringStruct

stringtoslicebyte function #

func stringtoslicebyte(buf *tmpBuf, s string) []byte

stringtoslicerune function #

func stringtoslicerune(buf *[tmpStringBufSize]rune, s string) []rune

strmax function #

func strmax(x string, y string) string

strmin function #

func strmin(x string, y string) string

sub method #

sub subtracts a uintptr offset from the offAddr.

func (l offAddr) sub(bytes uintptr) offAddr

subscriptionClock method #

func (u *subscriptionUnion) subscriptionClock() *subscriptionClock

subscriptionFdReadwrite method #

func (u *subscriptionUnion) subscriptionFdReadwrite() *subscriptionFdReadwrite

subtract method #

subtract takes the addrRange toPrune and cuts out any overlap with from, then returns the new range. subtract assumes that a and b either don't overlap at all, only overlap on one side, or are equal. If b is strictly contained in a, thus forcing a split, it will throw.

func (a addrRange) subtract(b addrRange) addrRange

subtract1 function #

subtract1 returns the byte pointer p-1. nosplit because it is used during write barriers and must not be preempted. go:nowritebarrier go:nosplit

func subtract1(p *byte) *byte

subtractb function #

subtractb returns the byte pointer p-n. go:nowritebarrier go:nosplit

func subtractb(p *byte, n uintptr) *byte

summarize method #

summarize returns a packed summary of the bitmap in pallocBits.

func (b *pallocBits) summarize() pallocSum

suspendG function #

suspendG suspends goroutine gp at a safe-point and returns the state of the suspended goroutine. The caller gets read access to the goroutine until it calls resumeG. It is safe for multiple callers to attempt to suspend the same goroutine at the same time. The goroutine may execute between subsequent successful suspend operations. The current implementation grants exclusive access to the goroutine, and hence multiple callers will serialize. However, the intent is to grant shared read access, so please don't depend on exclusive access. This must be called from the system stack and the user goroutine on the current M (if any) must be in a preemptible state. This prevents deadlocks where two goroutines attempt to suspend each other and both are in non-preemptible states. There are other ways to resolve this deadlock, but this seems simplest. TODO(austin): What if we instead required this to be called from a user goroutine? Then we could deschedule the goroutine while waiting instead of blocking the thread. If two goroutines tried to suspend each other, one of them would win and the other wouldn't complete the suspend until it was resumed. We would have to be careful that they couldn't actually queue up suspend for each other and then both be suspended. This would also avoid the need for a kernel context switch in the synchronous case because we could just directly schedule the waiter. The context switch is unavoidable in the signal case. go:systemstack

func suspendG(gp *g) suspendGState

swapsub function #

func swapsub(pd *pollDesc, from int, to int)

sweep method #

sweep frees or collects finalizers for blocks not marked in the mark phase. It clears the mark bits in preparation for the next GC round. Returns true if the span was returned to heap. If preserve=true, don't return it to heap nor relink in mcentral lists; caller takes care of it.

func (sl *sweepLocked) sweep(preserve bool) bool

sweepers method #

sweepers returns the current number of active sweepers.

func (a *activeSweep) sweepers() uint32

sweepone function #

sweepone sweeps some unswept heap span and returns the number of pages returned to the heap, or ^uintptr(0) if there was nothing to sweep.

func sweepone() uintptr

switchToCrashStack function #

Switch to crashstack and call fn, with special handling of concurrent and recursive cases. Nosplit as it is called in a bad stack condition (we know morestack would fail). go:nosplit go:nowritebarrierrec

func switchToCrashStack(fn func())

switchToCrashStack0 function #

go:noescape

func switchToCrashStack0(fn func())

symPC method #

symPC returns the PC that should be used for symbolizing the current frame. Specifically, this is the PC of the last instruction executed in this frame. If this frame did a normal call, then frame.pc is a return PC, so this will return frame.pc-1, which points into the CALL instruction. If the frame was interrupted by a signal (e.g., profiler, segv, etc) then frame.pc is for the trapped instruction, so this returns frame.pc. See issue #34123. Finally, frame.pc can be at function entry when the frame is initialized without actually running code, like in runtime.mstart, in which case this returns frame.pc because that's the best we can do.

func (u *unwinder) symPC() uintptr

sync_atomic_CompareAndSwapPointer function #

go:linkname sync_atomic_CompareAndSwapPointer sync/atomic.CompareAndSwapPointer go:nosplit

func sync_atomic_CompareAndSwapPointer(ptr *unsafe.Pointer, old unsafe.Pointer, new unsafe.Pointer) bool

sync_atomic_CompareAndSwapUintptr function #

go:linkname sync_atomic_CompareAndSwapUintptr sync/atomic.CompareAndSwapUintptr

func sync_atomic_CompareAndSwapUintptr(ptr *uintptr, old uintptr, new uintptr) bool

sync_atomic_StorePointer function #

go:linkname sync_atomic_StorePointer sync/atomic.StorePointer go:nosplit

func sync_atomic_StorePointer(ptr *unsafe.Pointer, new unsafe.Pointer)

sync_atomic_StoreUintptr function #

go:linkname sync_atomic_StoreUintptr sync/atomic.StoreUintptr

func sync_atomic_StoreUintptr(ptr *uintptr, new uintptr)

sync_atomic_SwapPointer function #

go:linkname sync_atomic_SwapPointer sync/atomic.SwapPointer go:nosplit

func sync_atomic_SwapPointer(ptr *unsafe.Pointer, new unsafe.Pointer) unsafe.Pointer

sync_atomic_SwapUintptr function #

go:linkname sync_atomic_SwapUintptr sync/atomic.SwapUintptr

func sync_atomic_SwapUintptr(ptr *uintptr, new uintptr) uintptr

sync_atomic_runtime_procPin function #

go:linkname sync_atomic_runtime_procPin sync/atomic.runtime_procPin go:nosplit

func sync_atomic_runtime_procPin() int

sync_atomic_runtime_procUnpin function #

go:linkname sync_atomic_runtime_procUnpin sync/atomic.runtime_procUnpin go:nosplit

func sync_atomic_runtime_procUnpin()

sync_fatal function #

go:linkname sync_fatal sync.fatal

func sync_fatal(s string)

sync_runtime_Semacquire function #

sync_runtime_Semacquire should be an internal detail, but widely used packages access it using linkname. Notable members of the hall of shame include: - gvisor.dev/gvisor - github.com/sagernet/gvisor Do not remove or change the type signature. See go.dev/issue/67401. go:linkname sync_runtime_Semacquire sync.runtime_Semacquire

func sync_runtime_Semacquire(addr *uint32)

sync_runtime_SemacquireRWMutex function #

go:linkname sync_runtime_SemacquireRWMutex sync.runtime_SemacquireRWMutex

func sync_runtime_SemacquireRWMutex(addr *uint32, lifo bool, skipframes int)

sync_runtime_SemacquireRWMutexR function #

go:linkname sync_runtime_SemacquireRWMutexR sync.runtime_SemacquireRWMutexR

func sync_runtime_SemacquireRWMutexR(addr *uint32, lifo bool, skipframes int)

sync_runtime_SemacquireWaitGroup function #

go:linkname sync_runtime_SemacquireWaitGroup sync.runtime_SemacquireWaitGroup

func sync_runtime_SemacquireWaitGroup(addr *uint32)

sync_runtime_Semrelease function #

sync_runtime_Semrelease should be an internal detail, but widely used packages access it using linkname. Notable members of the hall of shame include: - gvisor.dev/gvisor - github.com/sagernet/gvisor Do not remove or change the type signature. See go.dev/issue/67401. go:linkname sync_runtime_Semrelease sync.runtime_Semrelease

func sync_runtime_Semrelease(addr *uint32, handoff bool, skipframes int)

sync_runtime_canSpin function #

Active spinning for sync.Mutex. sync_runtime_canSpin should be an internal detail, but widely used packages access it using linkname. Notable members of the hall of shame include: - github.com/livekit/protocol - github.com/sagernet/gvisor - gvisor.dev/gvisor Do not remove or change the type signature. See go.dev/issue/67401. go:linkname sync_runtime_canSpin sync.runtime_canSpin go:nosplit

func sync_runtime_canSpin(i int) bool

sync_runtime_doSpin function #

sync_runtime_doSpin should be an internal detail, but widely used packages access it using linkname. Notable members of the hall of shame include: - github.com/livekit/protocol - github.com/sagernet/gvisor - gvisor.dev/gvisor Do not remove or change the type signature. See go.dev/issue/67401. go:linkname sync_runtime_doSpin sync.runtime_doSpin go:nosplit

func sync_runtime_doSpin()

sync_runtime_procPin function #

go:linkname sync_runtime_procPin sync.runtime_procPin go:nosplit

func sync_runtime_procPin() int

sync_runtime_procUnpin function #

go:linkname sync_runtime_procUnpin sync.runtime_procUnpin go:nosplit

func sync_runtime_procUnpin()

sync_runtime_registerPoolCleanup function #

sync_runtime_registerPoolCleanup should be an internal detail, but widely used packages access it using linkname. Notable members of the hall of shame include: - github.com/bytedance/gopkg - github.com/songzhibin97/gkit Do not remove or change the type signature. See go.dev/issue/67401. go:linkname sync_runtime_registerPoolCleanup sync.runtime_registerPoolCleanup

func sync_runtime_registerPoolCleanup(f func())

sync_throw function #

go:linkname sync_throw sync.throw

func sync_throw(s string)

syncadjustsudogs function #

syncadjustsudogs adjusts gp's sudogs and copies the part of gp's stack they refer to while synchronizing with concurrent channel operations. It returns the number of bytes of stack copied.

func syncadjustsudogs(gp *g, used uintptr, adjinfo *adjustinfo) uintptr

synctestRun function #

go:linkname synctestRun internal/synctest.Run

func synctestRun(f func())

synctestWait function #

go:linkname synctestWait internal/synctest.Wait

func synctestWait()

synctest_acquire function #

go:linkname synctest_acquire internal/synctest.acquire

func synctest_acquire() any

synctest_inBubble function #

go:linkname synctest_inBubble internal/synctest.inBubble

func synctest_inBubble(sg any, f func())

synctest_release function #

go:linkname synctest_release internal/synctest.release

func synctest_release(sg any)

synctestidle_c function #

func synctestidle_c(gp *g, _ unsafe.Pointer) bool

synctestwait_c function #

func synctestwait_c(gp *g, _ unsafe.Pointer) bool

sysAlloc function #

sysAlloc transitions an OS-chosen region of memory from None to Ready. More specifically, it obtains a large chunk of zeroed memory from the operating system, typically on the order of a hundred kilobytes or a megabyte. This memory is always immediately available for use. sysStat must be non-nil. Don't split the stack as this function may be invoked without a valid G, which prevents us from allocating more stack. go:nosplit

func sysAlloc(n uintptr, sysStat *sysMemStat) unsafe.Pointer

sysAlloc method #

sysAlloc allocates heap arena space for at least n bytes. The returned pointer is always heapArenaBytes-aligned and backed by h.arenas metadata. The returned size is always a multiple of heapArenaBytes. sysAlloc returns nil on failure. There is no corresponding free function. hintList is a list of hint addresses for where to allocate new heap arenas. It must be non-nil. register indicates whether the heap arena should be registered in allArenas. sysAlloc returns a memory region in the Reserved state. This region must be transitioned to Prepared and then Ready before use. h must be locked.

func (h *mheap) sysAlloc(n uintptr, hintList **arenaHint, register bool) (v unsafe.Pointer, size uintptr)

sysAllocOS function #

func sysAllocOS(n uintptr) unsafe.Pointer

sysAllocOS function #

Don't split the stack as this function may be invoked without a valid G, which prevents us from allocating more stack. go:nosplit

func sysAllocOS(n uintptr) unsafe.Pointer

sysAllocOS function #

Don't split the stack as this method may be invoked without a valid G, which prevents us from allocating more stack. go:nosplit

func sysAllocOS(n uintptr) unsafe.Pointer

sysAllocOS function #

Don't split the stack as this function may be invoked without a valid G, which prevents us from allocating more stack. go:nosplit

func sysAllocOS(n uintptr) unsafe.Pointer

sysAllocOS function #

Don't split the stack as this function may be invoked without a valid G, which prevents us from allocating more stack. go:nosplit

func sysAllocOS(n uintptr) unsafe.Pointer

sysAllocOS function #

Don't split the stack as this method may be invoked without a valid G, which prevents us from allocating more stack. go:nosplit

func sysAllocOS(n uintptr) unsafe.Pointer

sysFault function #

sysFault transitions a memory region from Ready to Reserved. It marks a region such that it will always fault if accessed. Used only for debugging the runtime. TODO(mknyszek): Currently it's true that all uses of sysFault transition memory from Ready to Reserved, but this may not be true in the future since on every platform the operation is much more general than that. If a transition from Prepared is ever introduced, create a new function that elides the Ready state accounting.

func sysFault(v unsafe.Pointer, n uintptr)

sysFaultOS function #

func sysFaultOS(v unsafe.Pointer, n uintptr)

sysFaultOS function #

func sysFaultOS(v unsafe.Pointer, n uintptr)

sysFaultOS function #

func sysFaultOS(v unsafe.Pointer, n uintptr)

sysFaultOS function #

func sysFaultOS(v unsafe.Pointer, n uintptr)

sysFaultOS function #

func sysFaultOS(v unsafe.Pointer, n uintptr)

sysFaultOS function #

func sysFaultOS(v unsafe.Pointer, n uintptr)

sysFree function #

sysFree transitions a memory region from any state to None. Therefore, it returns memory unconditionally. It is used if an out-of-memory error has been detected midway through an allocation or to carve out an aligned section of the address space. It is okay if sysFree is a no-op only if sysReserve always returns a memory region aligned to the heap allocator's alignment restrictions. sysStat must be non-nil. Don't split the stack as this function may be invoked without a valid G, which prevents us from allocating more stack. go:nosplit

func sysFree(v unsafe.Pointer, n uintptr, sysStat *sysMemStat)

sysFreeOS function #

Don't split the stack as this function may be invoked without a valid G, which prevents us from allocating more stack. go:nosplit

func sysFreeOS(v unsafe.Pointer, n uintptr)

sysFreeOS function #

Don't split the stack as this function may be invoked without a valid G, which prevents us from allocating more stack. go:nosplit

func sysFreeOS(v unsafe.Pointer, n uintptr)

sysFreeOS function #

Don't split the stack as this function may be invoked without a valid G, which prevents us from allocating more stack. go:nosplit

func sysFreeOS(v unsafe.Pointer, n uintptr)

sysFreeOS function #

Don't split the stack as this function may be invoked without a valid G, which prevents us from allocating more stack. go:nosplit

func sysFreeOS(v unsafe.Pointer, n uintptr)

sysFreeOS function #

func sysFreeOS(v unsafe.Pointer, n uintptr)

sysFreeOS function #

Don't split the stack as this function may be invoked without a valid G, which prevents us from allocating more stack. go:nosplit

func sysFreeOS(v unsafe.Pointer, n uintptr)

sysGrow method #

See mpagealloc_64bit.go for details.

func (p *pageAlloc) sysGrow(base uintptr, limit uintptr)

sysGrow method #

sysGrow is a no-op on 32-bit platforms.

func (s *scavengeIndex) sysGrow(base uintptr, limit uintptr, sysStat *sysMemStat) uintptr

sysGrow method #

sysGrow performs architecture-dependent operations on heap growth for the page allocator, such as mapping in new memory for summaries. It also updates the length of the slices in p.summary. base is the base of the newly-added heap memory and limit is the first address past the end of the newly-added heap memory. Both must be aligned to pallocChunkBytes. The caller must update p.start and p.end after calling sysGrow.

func (p *pageAlloc) sysGrow(base uintptr, limit uintptr)

sysGrow method #

sysGrow increases the index's backing store in response to a heap growth. Returns the amount of memory added to sysStat.

func (s *scavengeIndex) sysGrow(base uintptr, limit uintptr, sysStat *sysMemStat) uintptr

sysHugePage function #

sysHugePage does not transition memory regions, but instead provides a hint to the OS that it would be more efficient to back this memory region with pages of a larger size transparently.

func sysHugePage(v unsafe.Pointer, n uintptr)

sysHugePageCollapse function #

sysHugePageCollapse attempts to immediately back the provided memory region with huge pages. It is best-effort and may fail silently.

func sysHugePageCollapse(v unsafe.Pointer, n uintptr)

sysHugePageCollapseOS function #

func sysHugePageCollapseOS(v unsafe.Pointer, n uintptr)

sysHugePageCollapseOS function #

func sysHugePageCollapseOS(v unsafe.Pointer, n uintptr)

sysHugePageCollapseOS function #

func sysHugePageCollapseOS(v unsafe.Pointer, n uintptr)

sysHugePageCollapseOS function #

func sysHugePageCollapseOS(v unsafe.Pointer, n uintptr)

sysHugePageCollapseOS function #

func sysHugePageCollapseOS(v unsafe.Pointer, n uintptr)

sysHugePageCollapseOS function #

func sysHugePageCollapseOS(v unsafe.Pointer, n uintptr)

sysHugePageOS function #

func sysHugePageOS(v unsafe.Pointer, n uintptr)

sysHugePageOS function #

func sysHugePageOS(v unsafe.Pointer, n uintptr)

sysHugePageOS function #

func sysHugePageOS(v unsafe.Pointer, n uintptr)

sysHugePageOS function #

func sysHugePageOS(v unsafe.Pointer, n uintptr)

sysHugePageOS function #

func sysHugePageOS(v unsafe.Pointer, n uintptr)

sysHugePageOS function #

func sysHugePageOS(v unsafe.Pointer, n uintptr)

sysInit method #

sysInit performs architecture-dependent initialization of fields in pageAlloc. pageAlloc should be uninitialized except for sysStat if any runtime statistic should be updated.

func (p *pageAlloc) sysInit(test bool)

sysInit method #

sysInit initializes the scavengeIndex' chunks array. Returns the amount of memory added to sysStat.

func (s *scavengeIndex) sysInit(test bool, sysStat *sysMemStat) uintptr

sysInit method #

sysInit initializes the scavengeIndex' chunks array. Returns the amount of memory added to sysStat.

func (s *scavengeIndex) sysInit(test bool, sysStat *sysMemStat) (mappedReady uintptr)

sysInit method #

See mpagealloc_64bit.go for details.

func (p *pageAlloc) sysInit(test bool)

sysMap function #

sysMap transitions a memory region from Reserved to Prepared. It ensures the memory region can be efficiently transitioned to Ready. sysStat must be non-nil.

func sysMap(v unsafe.Pointer, n uintptr, sysStat *sysMemStat)

sysMapOS function #

func sysMapOS(v unsafe.Pointer, n uintptr)

sysMapOS function #

func sysMapOS(v unsafe.Pointer, n uintptr)

sysMapOS function #

func sysMapOS(v unsafe.Pointer, n uintptr)

sysMapOS function #

func sysMapOS(v unsafe.Pointer, n uintptr)

sysMapOS function #

func sysMapOS(v unsafe.Pointer, n uintptr)

sysMapOS function #

func sysMapOS(v unsafe.Pointer, n uintptr)

sysMmap function #

sysMmap calls the mmap system call. It is implemented in assembly.

func sysMmap(addr unsafe.Pointer, n uintptr, prot int32, flags int32, fd int32, off uint32) (p unsafe.Pointer, err int)

sysMunmap function #

sysMunmap calls the munmap system call. It is implemented in assembly.

func sysMunmap(addr unsafe.Pointer, n uintptr)

sysNoHugePage function #

sysNoHugePage does not transition memory regions, but instead provides a hint to the OS that it would be less efficient to back this memory region with pages of a larger size transparently.

func sysNoHugePage(v unsafe.Pointer, n uintptr)

sysNoHugePageOS function #

func sysNoHugePageOS(v unsafe.Pointer, n uintptr)

sysNoHugePageOS function #

func sysNoHugePageOS(v unsafe.Pointer, n uintptr)

sysNoHugePageOS function #

func sysNoHugePageOS(v unsafe.Pointer, n uintptr)

sysNoHugePageOS function #

func sysNoHugePageOS(v unsafe.Pointer, n uintptr)

sysNoHugePageOS function #

func sysNoHugePageOS(v unsafe.Pointer, n uintptr)

sysNoHugePageOS function #

func sysNoHugePageOS(v unsafe.Pointer, n uintptr)

sysReserve function #

sysReserve transitions a memory region from None to Reserved. It reserves address space in such a way that it would cause a fatal fault upon access (either via permissions or not committing the memory). Such a reservation is thus never backed by physical memory. If the pointer passed to it is non-nil, the caller wants the reservation there, but sysReserve can still choose another location if that one is unavailable. NOTE: sysReserve returns OS-aligned memory, but the heap allocator may use larger alignment, so the caller must be careful to realign the memory obtained by sysReserve.

func sysReserve(v unsafe.Pointer, n uintptr) unsafe.Pointer

sysReserveAligned function #

sysReserveAligned is like sysReserve, but the returned pointer is aligned to align bytes. It may reserve either n or n+align bytes, so it returns the size that was reserved.

func sysReserveAligned(v unsafe.Pointer, size uintptr, align uintptr) (unsafe.Pointer, uintptr)

sysReserveAlignedSbrk function #

func sysReserveAlignedSbrk(size uintptr, align uintptr) (unsafe.Pointer, uintptr)

sysReserveAlignedSbrk function #

func sysReserveAlignedSbrk(size uintptr, align uintptr) (unsafe.Pointer, uintptr)

sysReserveOS function #

func sysReserveOS(v unsafe.Pointer, n uintptr) unsafe.Pointer

sysReserveOS function #

func sysReserveOS(v unsafe.Pointer, n uintptr) unsafe.Pointer

sysReserveOS function #

func sysReserveOS(v unsafe.Pointer, n uintptr) unsafe.Pointer

sysReserveOS function #

func sysReserveOS(v unsafe.Pointer, n uintptr) unsafe.Pointer

sysReserveOS function #

func sysReserveOS(v unsafe.Pointer, n uintptr) unsafe.Pointer

sysReserveOS function #

func sysReserveOS(v unsafe.Pointer, n uintptr) unsafe.Pointer

sysSigaction function #

sysSigaction calls the sigaction system call. go:nosplit

func sysSigaction(sig uint32, new *sigactiont, old *sigactiont)

sysSigaction function #

sysSigaction calls the rt_sigaction system call. go:nosplit

func sysSigaction(sig uint32, new *sigactiont, old *sigactiont)

sysUnused function #

sysUnused transitions a memory region from Ready to Prepared. It notifies the operating system that the physical pages backing this memory region are no longer needed and can be reused for other purposes. The contents of a sysUnused memory region are considered forfeit and the region must not be accessed again until sysUsed is called.

func sysUnused(v unsafe.Pointer, n uintptr)

sysUnusedOS function #

func sysUnusedOS(v unsafe.Pointer, n uintptr)

sysUnusedOS function #

func sysUnusedOS(v unsafe.Pointer, n uintptr)

sysUnusedOS function #

func sysUnusedOS(v unsafe.Pointer, n uintptr)

sysUnusedOS function #

func sysUnusedOS(v unsafe.Pointer, n uintptr)

sysUnusedOS function #

func sysUnusedOS(v unsafe.Pointer, n uintptr)

sysUnusedOS function #

func sysUnusedOS(v unsafe.Pointer, n uintptr)

sysUsed function #

sysUsed transitions a memory region from Prepared to Ready. It notifies the operating system that the memory region is needed and ensures that the region may be safely accessed. This is typically a no-op on systems that don't have an explicit commit step and hard over-commit limits, but is critical on Windows, for example. This operation is idempotent for memory already in the Prepared state, so it is safe to refer, with v and n, to a range of memory that includes both Prepared and Ready memory. However, the caller must provide the exact amount of Prepared memory for accounting purposes.

func sysUsed(v unsafe.Pointer, n uintptr, prepared uintptr)

sysUsedOS function #

func sysUsedOS(v unsafe.Pointer, n uintptr)

sysUsedOS function #

func sysUsedOS(v unsafe.Pointer, n uintptr)

sysUsedOS function #

func sysUsedOS(v unsafe.Pointer, n uintptr)

sysUsedOS function #

func sysUsedOS(v unsafe.Pointer, n uintptr)

sysUsedOS function #

func sysUsedOS(v unsafe.Pointer, n uintptr)

sysUsedOS function #

func sysUsedOS(v unsafe.Pointer, n uintptr)

sys_umtx_op function #

go:noescape

func sys_umtx_op(addr *uint32, mode int32, val uint32, uaddr1 uintptr, ut *umtx_time) int32

sys_umtx_sleep function #

go:noescape

func sys_umtx_sleep(addr *uint32, val int32, timeout int32) int32

sys_umtx_wakeup function #

go:noescape

func sys_umtx_wakeup(addr *uint32, val int32) int32

sysargs function #

func sysargs(argc int32, argv **byte)

sysargs function #

func sysargs(argc int32, argv **byte)

sysargs function #

func sysargs(argc int32, argv **byte)

sysargs function #

func sysargs(argc int32, argv **byte)

sysargs function #

func sysargs(argc int32, argv **byte)

sysargs function #

func sysargs(argc int32, argv **byte)

sysargs function #

func sysargs(argc int32, argv **byte)

sysauxv function #

func sysauxv(auxv []uintptr) (pairs int)

sysauxv function #

func sysauxv(auxv []uintptr) (pairs int)

sysauxv function #

func sysauxv(auxv []uintptr) (pairs int)

sysauxv function #

func sysauxv(auxv []uintptr) (pairs int)

sysauxv function #

func sysauxv(auxv []uintptr) (pairs int)

syscall function #

func syscall()

syscall function #

func syscall()

syscall0 function #

go:nowritebarrier go:nosplit

func syscall0(fn *libFunc) (r uintptr, err uintptr)

syscall1 function #

go:nowritebarrier go:nosplit

func syscall1(fn *libFunc, a0 uintptr) (r uintptr, err uintptr)

syscall10 function #

func syscall10()

syscall10X function #

func syscall10X()

syscall2 function #

go:nowritebarrier go:nosplit go:cgo_unsafe_args

func syscall2(fn *libFunc, a0 uintptr, a1 uintptr) (r uintptr, err uintptr)

syscall3 function #

go:nowritebarrier go:nosplit go:cgo_unsafe_args

func syscall3(fn *libFunc, a0 uintptr, a1 uintptr, a2 uintptr) (r uintptr, err uintptr)

syscall4 function #

go:nowritebarrier go:nosplit go:cgo_unsafe_args

func syscall4(fn *libFunc, a0 uintptr, a1 uintptr, a2 uintptr, a3 uintptr) (r uintptr, err uintptr)

syscall5 function #

go:nowritebarrier go:nosplit go:cgo_unsafe_args

func syscall5(fn *libFunc, a0 uintptr, a1 uintptr, a2 uintptr, a3 uintptr, a4 uintptr) (r uintptr, err uintptr)

syscall6 function #

func syscall6()

syscall6 function #

go:nowritebarrier go:nosplit go:cgo_unsafe_args

func syscall6(fn *libFunc, a0 uintptr, a1 uintptr, a2 uintptr, a3 uintptr, a4 uintptr, a5 uintptr) (r uintptr, err uintptr)

syscall6 function #

func syscall6()

syscall6X function #

func syscall6X()

syscall6X function #

func syscall6X()

syscall9 function #

func syscall9()

syscallPtr function #

func syscallPtr()

syscallX function #

func syscallX()

syscallX function #

func syscallX()

syscall_Exit function #

go:linkname syscall_Exit syscall.Exit go:nosplit

func syscall_Exit(code int)

syscall_Getpagesize function #

go:linkname syscall_Getpagesize syscall.Getpagesize

func syscall_Getpagesize() int

syscall_RawSyscall function #

This is syscall.RawSyscall, it exists to satisfy some build dependency, but it doesn't work. This is exported via linkname to assembly in the syscall package. go:linkname syscall_RawSyscall

func syscall_RawSyscall(trap uintptr, a1 uintptr, a2 uintptr, a3 uintptr) (r1 uintptr, r2 uintptr, err uintptr)

syscall_Syscall function #

Syscall is needed because some packages (like net) need it too. The best way is to return EINVAL and let Golang handles its failure If the syscall can't fail, this function can redirect it to a real syscall. This is exported via linkname to assembly in the syscall package. go:nosplit go:linkname syscall_Syscall

func syscall_Syscall(fn uintptr, a1 uintptr, a2 uintptr, a3 uintptr) (r1 uintptr, r2 uintptr, err uintptr)

syscall_Syscall function #

go:linkname syscall_Syscall syscall.Syscall go:nosplit

func syscall_Syscall(fn uintptr, nargs uintptr, a1 uintptr, a2 uintptr, a3 uintptr) (r1 uintptr, r2 uintptr, err uintptr)

syscall_Syscall12 function #

go:linkname syscall_Syscall12 syscall.Syscall12 go:nosplit

func syscall_Syscall12(fn uintptr, nargs uintptr, a1 uintptr, a2 uintptr, a3 uintptr, a4 uintptr, a5 uintptr, a6 uintptr, a7 uintptr, a8 uintptr, a9 uintptr, a10 uintptr, a11 uintptr, a12 uintptr) (r1 uintptr, r2 uintptr, err uintptr)

syscall_Syscall15 function #

go:linkname syscall_Syscall15 syscall.Syscall15 go:nosplit

func syscall_Syscall15(fn uintptr, nargs uintptr, a1 uintptr, a2 uintptr, a3 uintptr, a4 uintptr, a5 uintptr, a6 uintptr, a7 uintptr, a8 uintptr, a9 uintptr, a10 uintptr, a11 uintptr, a12 uintptr, a13 uintptr, a14 uintptr, a15 uintptr) (r1 uintptr, r2 uintptr, err uintptr)

syscall_Syscall18 function #

go:linkname syscall_Syscall18 syscall.Syscall18 go:nosplit

func syscall_Syscall18(fn uintptr, nargs uintptr, a1 uintptr, a2 uintptr, a3 uintptr, a4 uintptr, a5 uintptr, a6 uintptr, a7 uintptr, a8 uintptr, a9 uintptr, a10 uintptr, a11 uintptr, a12 uintptr, a13 uintptr, a14 uintptr, a15 uintptr, a16 uintptr, a17 uintptr, a18 uintptr) (r1 uintptr, r2 uintptr, err uintptr)

syscall_Syscall6 function #

go:linkname syscall_Syscall6 syscall.Syscall6 go:nosplit

func syscall_Syscall6(fn uintptr, nargs uintptr, a1 uintptr, a2 uintptr, a3 uintptr, a4 uintptr, a5 uintptr, a6 uintptr) (r1 uintptr, r2 uintptr, err uintptr)

syscall_Syscall9 function #

go:linkname syscall_Syscall9 syscall.Syscall9 go:nosplit

func syscall_Syscall9(fn uintptr, nargs uintptr, a1 uintptr, a2 uintptr, a3 uintptr, a4 uintptr, a5 uintptr, a6 uintptr, a7 uintptr, a8 uintptr, a9 uintptr) (r1 uintptr, r2 uintptr, err uintptr)

syscall_SyscallN function #

go:linkname syscall_SyscallN syscall.SyscallN go:nosplit

func syscall_SyscallN(fn uintptr, args ...uintptr) (r1 uintptr, r2 uintptr, err uintptr)

syscall_cgocaller function #

wrapper for syscall package to call cgocall for libc (cgo) calls. go:linkname syscall_cgocaller syscall.cgocaller go:nosplit go:uintptrescapes

func syscall_cgocaller(fn unsafe.Pointer, args ...uintptr) uintptr

syscall_chdir function #

go:nosplit go:linkname syscall_chdir

func syscall_chdir(path uintptr) (err uintptr)

syscall_chdir function #

go:linkname syscall_chdir syscall.chdir go:nosplit

func syscall_chdir(path uintptr) (err uintptr)

syscall_chroot function #

go:nosplit go:linkname syscall_chroot

func syscall_chroot(path uintptr) (err uintptr)

syscall_chroot1 function #

go:linkname syscall_chroot1 syscall.chroot1 go:nosplit

func syscall_chroot1(path uintptr) (err uintptr)

syscall_close function #

like close, but must not split stack, for forkx. go:nosplit go:linkname syscall_close

func syscall_close(fd int32) int32

syscall_closeFD function #

like close, but must not split stack, for fork. go:linkname syscall_closeFD syscall.closeFD go:nosplit

func syscall_closeFD(fd int32) int32

syscall_dup2 function #

go:nosplit go:linkname syscall_dup2

func syscall_dup2(oldfd uintptr, newfd uintptr) (val uintptr, err uintptr)

syscall_dup2child function #

go:linkname syscall_dup2child syscall.dup2child go:nosplit

func syscall_dup2child(old uintptr, new uintptr) (val uintptr, err uintptr)

syscall_execve function #

go:linkname syscall_execve syscall.execve go:nosplit

func syscall_execve(path uintptr, argv uintptr, envp uintptr) (err uintptr)

syscall_execve function #

go:nosplit go:linkname syscall_execve go:cgo_unsafe_args

func syscall_execve(path uintptr, argv uintptr, envp uintptr) (err uintptr)

syscall_exit function #

like exit, but must not split stack, for fork. go:linkname syscall_exit syscall.exit go:nosplit

func syscall_exit(code uintptr)

syscall_exit function #

like exit, but must not split stack, for forkx. go:nosplit go:linkname syscall_exit

func syscall_exit(code uintptr)

syscall_fcntl function #

go:nosplit go:linkname syscall_fcntl go:cgo_unsafe_args

func syscall_fcntl(fd uintptr, cmd uintptr, arg uintptr) (val uintptr, err uintptr)

syscall_fcntl1 function #

go:linkname syscall_fcntl1 syscall.fcntl1 go:nosplit

func syscall_fcntl1(fd uintptr, cmd uintptr, arg uintptr) (val uintptr, err uintptr)

syscall_forkx function #

go:linkname syscall_forkx syscall.forkx go:nosplit

func syscall_forkx(flags uintptr) (pid uintptr, err uintptr)

syscall_forkx function #

go:nosplit go:linkname syscall_forkx

func syscall_forkx(flags uintptr) (pid uintptr, err uintptr)

syscall_gethostname function #

go:linkname syscall_gethostname

func syscall_gethostname() (name string, err uintptr)

syscall_getpid function #

go:linkname syscall_getpid syscall.getpid go:nosplit

func syscall_getpid() (pid uintptr, err uintptr)

syscall_getpid function #

go:nosplit go:linkname syscall_getpid

func syscall_getpid() (pid uintptr, err uintptr)

syscall_getprocaddress function #

golang.org/x/sys linknames syscall.getprocaddress (in addition to standard package syscall). Do not remove or change the type signature. go:linkname syscall_getprocaddress syscall.getprocaddress

func syscall_getprocaddress(handle uintptr, procname *byte) (outhandle uintptr, err uintptr)

syscall_ioctl function #

go:nosplit go:linkname syscall_ioctl go:cgo_unsafe_args

func syscall_ioctl(fd uintptr, req uintptr, arg uintptr) (err uintptr)

syscall_ioctl function #

go:linkname syscall_ioctl syscall.ioctl go:nosplit

func syscall_ioctl(fd uintptr, req uintptr, arg uintptr) (err uintptr)

syscall_loadlibrary function #

golang.org/x/sys linknames syscall.loadlibrary (in addition to standard package syscall). Do not remove or change the type signature. go:linkname syscall_loadlibrary syscall.loadlibrary

func syscall_loadlibrary(filename *uint16) (handle uintptr, err uintptr)

syscall_loadsystemlibrary function #

go:linkname syscall_loadsystemlibrary syscall.loadsystemlibrary

func syscall_loadsystemlibrary(filename *uint16) (handle uintptr, err uintptr)

syscall_now function #

go:linkname syscall_now syscall.now

func syscall_now() (sec int64, nsec int32)

syscall_rawSyscall function #

golang.org/x/sys linknames syscall_rawSyscall (in addition to standard package syscall). Do not remove or change the type signature. go:linkname syscall_rawSyscall syscall.rawSyscall go:nosplit go:cgo_unsafe_args

func syscall_rawSyscall(fn uintptr, a1 uintptr, a2 uintptr, a3 uintptr) (r1 uintptr, r2 uintptr, err uintptr)

syscall_rawSyscall function #

golang.org/x/sys linknames syscall_rawSyscall (in addition to standard package syscall). Do not remove or change the type signature. go:linkname syscall_rawSyscall syscall.rawSyscall go:nosplit

func syscall_rawSyscall(fn uintptr, a1 uintptr, a2 uintptr, a3 uintptr) (r1 uintptr, r2 uintptr, err uintptr)

syscall_rawSyscall10X function #

go:linkname syscall_rawSyscall10X syscall.rawSyscall10X go:nosplit go:cgo_unsafe_args

func syscall_rawSyscall10X(fn uintptr, a1 uintptr, a2 uintptr, a3 uintptr, a4 uintptr, a5 uintptr, a6 uintptr, a7 uintptr, a8 uintptr, a9 uintptr, a10 uintptr) (r1 uintptr, r2 uintptr, err uintptr)

syscall_rawSyscall6 function #

This is exported via linkname to assembly in the syscall package. go:nosplit go:cgo_unsafe_args go:linkname syscall_rawSyscall6

func syscall_rawSyscall6(fn uintptr, nargs uintptr, a1 uintptr, a2 uintptr, a3 uintptr, a4 uintptr, a5 uintptr, a6 uintptr) (r1 uintptr, r2 uintptr, err uintptr)

syscall_rawSyscall6 function #

golang.org/x/sys linknames syscall_rawSyscall6 (in addition to standard package syscall). Do not remove or change the type signature. go:linkname syscall_rawSyscall6 syscall.rawSyscall6 go:nosplit

func syscall_rawSyscall6(fn uintptr, a1 uintptr, a2 uintptr, a3 uintptr, a4 uintptr, a5 uintptr, a6 uintptr) (r1 uintptr, r2 uintptr, err uintptr)

syscall_rawSyscall6 function #

golang.org/x/sys linknames syscall_rawSyscall6 (in addition to standard package syscall). Do not remove or change the type signature. go:linkname syscall_rawSyscall6 syscall.rawSyscall6 go:nosplit go:cgo_unsafe_args

func syscall_rawSyscall6(fn uintptr, a1 uintptr, a2 uintptr, a3 uintptr, a4 uintptr, a5 uintptr, a6 uintptr) (r1 uintptr, r2 uintptr, err uintptr)

syscall_rawSyscall6X function #

go:linkname syscall_rawSyscall6X syscall.rawSyscall6X go:nosplit go:cgo_unsafe_args

func syscall_rawSyscall6X(fn uintptr, a1 uintptr, a2 uintptr, a3 uintptr, a4 uintptr, a5 uintptr, a6 uintptr) (r1 uintptr, r2 uintptr, err uintptr)

syscall_rawsyscall function #

This is syscall.RawSyscall, it exists to satisfy some build dependency, but it doesn't work. go:linkname syscall_rawsyscall

func syscall_rawsyscall(trap uintptr, a1 uintptr, a2 uintptr, a3 uintptr) (r1 uintptr, r2 uintptr, err uintptr)

syscall_rawsyscall6 function #

This is syscall.RawSyscall6, it exists to avoid a linker error because syscall.RawSyscall6 is already declared. See golang.org/issue/24357 go:linkname syscall_rawsyscall6

func syscall_rawsyscall6(trap uintptr, a1 uintptr, a2 uintptr, a3 uintptr, a4 uintptr, a5 uintptr, a6 uintptr) (r1 uintptr, r2 uintptr, err uintptr)

syscall_rawsysvicall6 function #

go:nosplit go:linkname syscall_rawsysvicall6 go:cgo_unsafe_args

func syscall_rawsysvicall6(fn uintptr, nargs uintptr, a1 uintptr, a2 uintptr, a3 uintptr, a4 uintptr, a5 uintptr, a6 uintptr) (r1 uintptr, r2 uintptr, err uintptr)

syscall_runtimeSetenv function #

go:linkname syscall_runtimeSetenv syscall.runtimeSetenv

func syscall_runtimeSetenv(key string, value string)

syscall_runtimeUnsetenv function #

go:linkname syscall_runtimeUnsetenv syscall.runtimeUnsetenv

func syscall_runtimeUnsetenv(key string)

syscall_runtime_AfterExec function #

Called from syscall package after Exec. go:linkname syscall_runtime_AfterExec syscall.runtime_AfterExec

func syscall_runtime_AfterExec()

syscall_runtime_AfterFork function #

Called from syscall package after fork in parent. syscall_runtime_AfterFork is for package syscall, but widely used packages access it using linkname. Notable members of the hall of shame include: - gvisor.dev/gvisor Do not remove or change the type signature. See go.dev/issue/67401. go:linkname syscall_runtime_AfterFork syscall.runtime_AfterFork go:nosplit

func syscall_runtime_AfterFork()

syscall_runtime_AfterForkInChild function #

Called from syscall package after fork in child. It resets non-sigignored signals to the default handler, and restores the signal mask in preparation for the exec. Because this might be called during a vfork, and therefore may be temporarily sharing address space with the parent process, this must not change any global variables or calling into C code that may do so. syscall_runtime_AfterForkInChild is for package syscall, but widely used packages access it using linkname. Notable members of the hall of shame include: - gvisor.dev/gvisor Do not remove or change the type signature. See go.dev/issue/67401. go:linkname syscall_runtime_AfterForkInChild syscall.runtime_AfterForkInChild go:nosplit go:nowritebarrierrec

func syscall_runtime_AfterForkInChild()

syscall_runtime_BeforeExec function #

Called from syscall package before Exec. go:linkname syscall_runtime_BeforeExec syscall.runtime_BeforeExec

func syscall_runtime_BeforeExec()

syscall_runtime_BeforeFork function #

Called from syscall package before fork. syscall_runtime_BeforeFork is for package syscall, but widely used packages access it using linkname. Notable members of the hall of shame include: - gvisor.dev/gvisor Do not remove or change the type signature. See go.dev/issue/67401. go:linkname syscall_runtime_BeforeFork syscall.runtime_BeforeFork go:nosplit

func syscall_runtime_BeforeFork()

syscall_runtime_doAllThreadsSyscall function #

syscall_runtime_doAllThreadsSyscall and executes a specified system call on all Ms. The system call is expected to succeed and return the same value on every thread. If any threads do not match, the runtime throws. go:linkname syscall_runtime_doAllThreadsSyscall syscall.runtime_doAllThreadsSyscall go:uintptrescapes

func syscall_runtime_doAllThreadsSyscall(trap uintptr, a1 uintptr, a2 uintptr, a3 uintptr, a4 uintptr, a5 uintptr, a6 uintptr) (r1 uintptr, r2 uintptr, err uintptr)

syscall_runtime_envs function #

go:linkname syscall_runtime_envs syscall.runtime_envs

func syscall_runtime_envs() []string

syscall_setgid function #

go:linkname syscall_setgid syscall.setgid go:nosplit

func syscall_setgid(gid uintptr) (err uintptr)

syscall_setgid function #

go:nosplit go:linkname syscall_setgid

func syscall_setgid(gid uintptr) (err uintptr)

syscall_setgroups function #

go:nosplit go:linkname syscall_setgroups go:cgo_unsafe_args

func syscall_setgroups(ngid uintptr, gid uintptr) (err uintptr)

syscall_setgroups1 function #

go:linkname syscall_setgroups1 syscall.setgroups1 go:nosplit

func syscall_setgroups1(ngid uintptr, gid uintptr) (err uintptr)

syscall_setpgid function #

go:linkname syscall_setpgid syscall.setpgid go:nosplit

func syscall_setpgid(pid uintptr, pgid uintptr) (err uintptr)

syscall_setpgid function #

go:nosplit go:linkname syscall_setpgid go:cgo_unsafe_args

func syscall_setpgid(pid uintptr, pgid uintptr) (err uintptr)

syscall_setrlimit function #

go:nosplit go:linkname syscall_setrlimit go:cgo_unsafe_args

func syscall_setrlimit(which uintptr, lim unsafe.Pointer) (err uintptr)

syscall_setrlimit1 function #

go:linkname syscall_setrlimit1 syscall.setrlimit1 go:nosplit

func syscall_setrlimit1(which uintptr, lim unsafe.Pointer) (err uintptr)

syscall_setsid function #

go:nosplit go:linkname syscall_setsid

func syscall_setsid() (pid uintptr, err uintptr)

syscall_setsid function #

go:linkname syscall_setsid syscall.setsid go:nosplit

func syscall_setsid() (pid uintptr, err uintptr)

syscall_setuid function #

go:linkname syscall_setuid syscall.setuid go:nosplit

func syscall_setuid(uid uintptr) (err uintptr)

syscall_setuid function #

go:nosplit go:linkname syscall_setuid

func syscall_setuid(uid uintptr) (err uintptr)

syscall_syscall function #

golang.org/x/sys linknames syscall_syscall (in addition to standard package syscall). Do not remove or change the type signature. go:linkname syscall_syscall syscall.syscall go:nosplit

func syscall_syscall(fn uintptr, a1 uintptr, a2 uintptr, a3 uintptr) (r1 uintptr, r2 uintptr, err uintptr)

syscall_syscall function #

go:linkname syscall_syscall go:cgo_unsafe_args

func syscall_syscall(trap uintptr, a1 uintptr, a2 uintptr, a3 uintptr) (r1 uintptr, r2 uintptr, err uintptr)

syscall_syscall function #

golang.org/x/sys linknames syscall_syscall (in addition to standard package syscall). Do not remove or change the type signature. go:linkname syscall_syscall syscall.syscall go:nosplit go:cgo_unsafe_args

func syscall_syscall(fn uintptr, a1 uintptr, a2 uintptr, a3 uintptr) (r1 uintptr, r2 uintptr, err uintptr)

syscall_syscall10 function #

golang.org/x/sys linknames syscall.syscall10 (in addition to standard package syscall). Do not remove or change the type signature. go:linkname syscall_syscall10 syscall.syscall10 go:nosplit go:cgo_unsafe_args

func syscall_syscall10(fn uintptr, a1 uintptr, a2 uintptr, a3 uintptr, a4 uintptr, a5 uintptr, a6 uintptr, a7 uintptr, a8 uintptr, a9 uintptr, a10 uintptr) (r1 uintptr, r2 uintptr, err uintptr)

syscall_syscall10X function #

go:linkname syscall_syscall10X syscall.syscall10X go:nosplit go:cgo_unsafe_args

func syscall_syscall10X(fn uintptr, a1 uintptr, a2 uintptr, a3 uintptr, a4 uintptr, a5 uintptr, a6 uintptr, a7 uintptr, a8 uintptr, a9 uintptr, a10 uintptr) (r1 uintptr, r2 uintptr, err uintptr)

syscall_syscall6 function #

This is exported via linkname to assembly in the syscall package. go:nosplit go:cgo_unsafe_args go:linkname syscall_syscall6

func syscall_syscall6(fn uintptr, nargs uintptr, a1 uintptr, a2 uintptr, a3 uintptr, a4 uintptr, a5 uintptr, a6 uintptr) (r1 uintptr, r2 uintptr, err uintptr)

syscall_syscall6 function #

golang.org/x/sys linknames syscall.syscall6 (in addition to standard package syscall). Do not remove or change the type signature. syscall.syscall6 is meant for package syscall (and x/sys), but widely used packages access it using linkname. Notable members of the hall of shame include: - github.com/tetratelabs/wazero See go.dev/issue/67401. go:linkname syscall_syscall6 syscall.syscall6 go:nosplit

func syscall_syscall6(fn uintptr, a1 uintptr, a2 uintptr, a3 uintptr, a4 uintptr, a5 uintptr, a6 uintptr) (r1 uintptr, r2 uintptr, err uintptr)

syscall_syscall6 function #

golang.org/x/sys linknames syscall.syscall6 (in addition to standard package syscall). Do not remove or change the type signature. go:linkname syscall_syscall6 syscall.syscall6 go:nosplit go:cgo_unsafe_args

func syscall_syscall6(fn uintptr, a1 uintptr, a2 uintptr, a3 uintptr, a4 uintptr, a5 uintptr, a6 uintptr) (r1 uintptr, r2 uintptr, err uintptr)

syscall_syscall6X function #

go:linkname syscall_syscall6X syscall.syscall6X go:nosplit go:cgo_unsafe_args

func syscall_syscall6X(fn uintptr, a1 uintptr, a2 uintptr, a3 uintptr, a4 uintptr, a5 uintptr, a6 uintptr) (r1 uintptr, r2 uintptr, err uintptr)

syscall_syscall6X function #

go:linkname syscall_syscall6X syscall.syscall6X go:nosplit

func syscall_syscall6X(fn uintptr, a1 uintptr, a2 uintptr, a3 uintptr, a4 uintptr, a5 uintptr, a6 uintptr) (r1 uintptr, r2 uintptr, err uintptr)

syscall_syscall9 function #

golang.org/x/sys linknames syscall.syscall9 (in addition to standard package syscall). Do not remove or change the type signature. go:linkname syscall_syscall9 syscall.syscall9 go:nosplit go:cgo_unsafe_args

func syscall_syscall9(fn uintptr, a1 uintptr, a2 uintptr, a3 uintptr, a4 uintptr, a5 uintptr, a6 uintptr, a7 uintptr, a8 uintptr, a9 uintptr) (r1 uintptr, r2 uintptr, err uintptr)

syscall_syscallPtr function #

golang.org/x/sys linknames syscall.syscallPtr (in addition to standard package syscall). Do not remove or change the type signature. go:linkname syscall_syscallPtr syscall.syscallPtr go:nosplit

func syscall_syscallPtr(fn uintptr, a1 uintptr, a2 uintptr, a3 uintptr) (r1 uintptr, r2 uintptr, err uintptr)

syscall_syscallX function #

go:linkname syscall_syscallX syscall.syscallX go:nosplit go:cgo_unsafe_args

func syscall_syscallX(fn uintptr, a1 uintptr, a2 uintptr, a3 uintptr) (r1 uintptr, r2 uintptr, err uintptr)

syscall_syscallX function #

go:linkname syscall_syscallX syscall.syscallX go:nosplit

func syscall_syscallX(fn uintptr, a1 uintptr, a2 uintptr, a3 uintptr) (r1 uintptr, r2 uintptr, err uintptr)

syscall_syscalln function #

go:nosplit

func syscall_syscalln(fn uintptr, n uintptr, args ...uintptr) (r1 uintptr, r2 uintptr, err uintptr)

syscall_sysvicall6 function #

go:nosplit go:linkname syscall_sysvicall6 go:cgo_unsafe_args

func syscall_sysvicall6(fn uintptr, nargs uintptr, a1 uintptr, a2 uintptr, a3 uintptr, a4 uintptr, a5 uintptr, a6 uintptr) (r1 uintptr, r2 uintptr, err uintptr)

syscall_wait4 function #

go:linkname syscall_wait4 go:cgo_unsafe_args

func syscall_wait4(pid uintptr, wstatus *uint32, options uintptr, rusage unsafe.Pointer) (wpid int, err uintptr)

syscall_write function #

go:nosplit go:linkname syscall_write go:cgo_unsafe_args

func syscall_write(fd uintptr, buf uintptr, nbyte uintptr) (n uintptr, err uintptr)

syscall_write1 function #

go:linkname syscall_write1 syscall.write1 go:nosplit

func syscall_write1(fd uintptr, buf uintptr, nbyte uintptr) (n uintptr, err uintptr)

syscall_x509 function #

func syscall_x509()

sysconf function #

go:nosplit

func sysconf(name int32) uintptr

sysconf function #

func sysconf(name int32) int64

sysctl function #

go:noescape

func sysctl(mib *uint32, miblen uint32, out *byte, size *uintptr, dst *byte, ndst uintptr) int32

sysctl function #

go:noescape

func sysctl(mib *uint32, miblen uint32, out *byte, size *uintptr, dst *byte, ndst uintptr) int32

sysctl function #

go:noescape

func sysctl(mib *uint32, miblen uint32, out *byte, size *uintptr, dst *byte, ndst uintptr) int32

sysctl function #

go:noescape

func sysctl(mib *uint32, miblen uint32, out *byte, size *uintptr, dst *byte, ndst uintptr) int32

sysctl function #

go:nosplit go:cgo_unsafe_args

func sysctl(mib *uint32, miblen uint32, oldp *byte, oldlenp *uintptr, newp *byte, newlen uintptr) int32

sysctl function #

go:nosplit go:cgo_unsafe_args

func sysctl(mib *uint32, miblen uint32, out *byte, size *uintptr, dst *byte, ndst uintptr) int32

sysctlInt function #

func sysctlInt(mib []uint32) (int32, bool)

sysctlInt function #

func sysctlInt(mib []uint32) (int32, bool)

sysctlUint64 function #

func sysctlUint64(mib []uint32) (uint64, bool)

sysctl_trampoline function #

func sysctl_trampoline()

sysctl_trampoline function #

func sysctl_trampoline()

sysctlbyname function #

go:nosplit go:cgo_unsafe_args

func sysctlbyname(name *byte, oldp *byte, oldlenp *uintptr, newp *byte, newlen uintptr) int32

sysctlbynameInt32 function #

func sysctlbynameInt32(name []byte) (int32, int32)

sysctlbyname_trampoline function #

func sysctlbyname_trampoline()

sysctlnametomib function #

sysctlnametomib fill mib with dynamically assigned sysctl entries of name, return count of effected mib slots, return 0 on error.

func sysctlnametomib(name []byte, mib *[_CTL_MAXNAME]uint32) uint32

sysmon function #

Always runs without a P, so write barriers are not allowed. go:nowritebarrierrec

func sysmon()

sysrand_fatal function #

go:linkname sysrand_fatal crypto/internal/sysrand.fatal

func sysrand_fatal(s string)

systemstack function #

systemstack runs fn on a system stack. If systemstack is called from the per-OS-thread (g0) stack, or if systemstack is called from the signal handling (gsignal) stack, systemstack calls fn directly and returns. Otherwise, systemstack is being called from the limited stack of an ordinary goroutine. In this case, systemstack switches to the per-OS-thread stack, calls fn, and switches back. It is common to use a func literal as the argument, in order to share inputs and outputs with the code around the call to system stack: ... set up y ... systemstack(func() { x = bigcall(y) }) ... use x ... go:noescape

func systemstack(fn func())

systemstack_switch function #

func systemstack_switch()

sysvicall0 function #

go:nosplit

func sysvicall0(fn *libcFunc) uintptr

sysvicall1 function #

go:nosplit

func sysvicall1(fn *libcFunc, a1 uintptr) uintptr

sysvicall1Err function #

sysvicall1Err returns both the system call result and the errno value. This is used by sysvicall1 and pipe. go:nosplit

func sysvicall1Err(fn *libcFunc, a1 uintptr) (r1 uintptr, err uintptr)

sysvicall2 function #

go:nosplit

func sysvicall2(fn *libcFunc, a1 uintptr, a2 uintptr) uintptr

sysvicall2Err function #

sysvicall2Err returns both the system call result and the errno value. This is used by sysvicall2 and pipe2.

func sysvicall2Err(fn *libcFunc, a1 uintptr, a2 uintptr) (uintptr, uintptr)

sysvicall3 function #

go:nosplit

func sysvicall3(fn *libcFunc, a1 uintptr, a2 uintptr, a3 uintptr) uintptr

sysvicall3Err function #

sysvicall3Err returns both the system call result and the errno value. This is used by sysvicall3 and write1.

func sysvicall3Err(fn *libcFunc, a1 uintptr, a2 uintptr, a3 uintptr) (r1 uintptr, err uintptr)

sysvicall4 function #

go:nosplit go:cgo_unsafe_args

func sysvicall4(fn *libcFunc, a1 uintptr, a2 uintptr, a3 uintptr, a4 uintptr) uintptr

sysvicall5 function #

go:nosplit go:cgo_unsafe_args

func sysvicall5(fn *libcFunc, a1 uintptr, a2 uintptr, a3 uintptr, a4 uintptr, a5 uintptr) uintptr

sysvicall6 function #

go:nosplit go:cgo_unsafe_args

func sysvicall6(fn *libcFunc, a1 uintptr, a2 uintptr, a3 uintptr, a4 uintptr, a5 uintptr, a6 uintptr) uintptr

t0 method #

func (c *sigctxt) t0() uint64

t0 method #

func (c *sigctxt) t0() uint64

t0 method #

func (c *sigctxt) t0() uint64

t1 method #

func (c *sigctxt) t1() uint64

t1 method #

func (c *sigctxt) t1() uint64

t1 method #

func (c *sigctxt) t1() uint64

t2 method #

func (c *sigctxt) t2() uint64

t2 method #

func (c *sigctxt) t2() uint64

t2 method #

func (c *sigctxt) t2() uint64

t3 method #

func (c *sigctxt) t3() uint64

t3 method #

func (c *sigctxt) t3() uint64

t3 method #

func (c *sigctxt) t3() uint64

t4 method #

func (c *sigctxt) t4() uint64

t4 method #

func (c *sigctxt) t4() uint64

t4 method #

func (c *sigctxt) t4() uint64

t5 method #

func (c *sigctxt) t5() uint64

t5 method #

func (c *sigctxt) t5() uint64

t5 method #

func (c *sigctxt) t5() uint64

t6 method #

func (c *sigctxt) t6() uint64

t6 method #

func (c *sigctxt) t6() uint64

t6 method #

func (c *sigctxt) t6() uint64

tag method #

Tag returns the tag from a taggedPointer.

func (tp taggedPointer) tag() uintptr

tag method #

Tag returns the tag from a taggedPointer.

func (tp taggedPointer) tag() uintptr

tagCount method #

func (x profIndex) tagCount() uint32

taggedPointerPack function #

taggedPointerPack created a taggedPointer from a pointer and a tag. Tag bits that don't fit in the result are discarded.

func taggedPointerPack(ptr unsafe.Pointer, tag uintptr) taggedPointer

taggedPointerPack function #

taggedPointerPack created a taggedPointer from a pointer and a tag. Tag bits that don't fit in the result are discarded.

func taggedPointerPack(ptr unsafe.Pointer, tag uintptr) taggedPointer

tail method #

tail returns the tail of a headTailIndex value.

func (h headTailIndex) tail() uint32

take method #

take moves any timers from src into ts and then clears the timer state from src, because src is being destroyed. The caller must not have locked either timers. For now this is only called when the world is stopped.

func (ts *timers) take(src *timers)

takeAll method #

takeAll removes all spans from other and inserts them at the front of list.

func (list *mSpanList) takeAll(other *mSpanList)

takeFromBack method #

takeFromBack takes len bytes from the end of the address range, aligning the limit to align after subtracting len. On success, returns the aligned start of the region taken and true.

func (a *addrRange) takeFromBack(len uintptr, align uint8) (uintptr, bool)

takeFromFront method #

takeFromFront takes len bytes from the front of the address range, aligning the base to align first. On success, returns the aligned start of the region taken and true.

func (a *addrRange) takeFromFront(len uintptr, align uint8) (uintptr, bool)

takeOverflow method #

takeOverflow consumes the pending overflow records, returning the overflow count and the time of the first overflow. When called by the reader, it is racing against incrementOverflow.

func (b *profBuf) takeOverflow() (count uint32, time uint64)

templateThread function #

templateThread is a thread in a known-good state that exists solely to start new threads in known-good states when the calling thread may not be in a good state. Many programs never need this, so templateThread is started lazily when we first enter a state that might lead to running on a thread in an unknown state. templateThread runs on an M without a P, so it must not have write barriers. go:nowritebarrierrec

func templateThread()

test method #

test reports whether the trigger condition is satisfied, meaning that the exit condition for the _GCoff phase has been met. The exit condition should be tested when allocating.

func (t gcTrigger) test() bool

testAtomic64 function #

func testAtomic64()

testSPWrite function #

func testSPWrite()

testSPWrite function #

func testSPWrite()

textAddr method #

textAddr returns md.text + off, with special handling for multiple text sections. off is a (virtual) offset computed at internal linking time, before the external linker adjusts the sections' base addresses. The text, or instruction stream is generated as one large buffer. The off (offset) for a function is its offset within this buffer. If the total text size gets too large, there can be issues on platforms like ppc64 if the target of calls are too far for the call instruction. To resolve the large text issue, the text is split into multiple text sections to allow the linker to generate long calls when necessary. When this happens, the vaddr for each text section is set to its offset within the text. Each function's offset is compared against the section vaddrs and ends to determine the containing section. Then the section relative offset is added to the section's relocated baseaddr to compute the function address. It is nosplit because it is part of the findfunc implementation. go:nosplit

func (md *moduledata) textAddr(off32 uint32) uintptr

textOff method #

textOff is the opposite of textAddr. It converts a PC to a (virtual) offset to md.text, and returns if the PC is in any Go text section. It is nosplit because it is part of the findfunc implementation. go:nosplit

func (md *moduledata) textOff(pc uintptr) (uint32, bool)

textOff method #

func (t rtype) textOff(off textOff) unsafe.Pointer

tfork function #

go:noescape

func tfork(param *tforkt, psize uintptr, mm *m, gg *g, fn uintptr) int32

tgkill function #

func tgkill(tgid int, tid int, sig int)

thr_kill function #

func thr_kill(tid thread, sig int)

thr_new function #

go:noescape

func thr_new(param *thrparam, size int32) int32

thr_self function #

func thr_self() thread

thr_start function #

func thr_start()

threadCreateProfileInternal function #

threadCreateProfileInternal returns the number of records n in the profile. If there are less than size records, copyFn is invoked for each record, and ok returns true.

func threadCreateProfileInternal(size int, copyFn func(profilerecord.StackRecord)) (n int, ok bool)

thrkill function #

func thrkill(tid int32, sig int)

thrkill function #

go:nosplit go:cgo_unsafe_args

func thrkill(tid int32, sig int)

thrkill_trampoline function #

func thrkill_trampoline()

throw function #

throw triggers a fatal error that dumps a stack trace and exits. throw should be used for runtime-internal fatal errors where Go itself, rather than user code, may be at fault for the failure. throw should be an internal detail, but widely used packages access it using linkname. Notable members of the hall of shame include: - github.com/bytedance/sonic - github.com/cockroachdb/pebble - github.com/dgraph-io/ristretto - github.com/outcaste-io/ristretto - github.com/pingcap/br - gvisor.dev/gvisor - github.com/sagernet/gvisor Do not remove or change the type signature. See go.dev/issue/67401. go:linkname throw go:nosplit

func throw(s string)

thrsleep function #

go:noescape

func thrsleep(ident uintptr, clock_id int32, tsp *timespec, lock uintptr, abort *uint32) int32

thrsleep function #

go:nosplit go:cgo_unsafe_args

func thrsleep(ident uintptr, clock_id int32, tsp *timespec, lock uintptr, abort *uint32) int32

thrsleep_trampoline function #

func thrsleep_trampoline()

thrwakeup function #

go:noescape

func thrwakeup(ident uintptr, n int32) int32

thrwakeup function #

go:nosplit go:cgo_unsafe_args

func thrwakeup(ident uintptr, n int32) int32

thrwakeup_trampoline function #

func thrwakeup_trampoline()

ticksPerSecond function #

ticksPerSecond returns a conversion rate between the cputicks clock and the nanotime clock. Note: Clocks are hard. Using this as an actual conversion rate for timestamps is ill-advised and should be avoided when possible. Use only for durations, where a tiny error term isn't going to make a meaningful difference in even a 1ms duration. If an accurate timestamp is needed, use nanotime instead. (The entire Windows platform is a broad exception to this rule, where nanotime produces timestamps on such a coarse granularity that the error from this conversion is actually preferable.) The strategy for computing the conversion rate is to write down nanotime and cputicks as early in process startup as possible. From then, we just need to wait until we get values from nanotime that we can use (some platforms have a really coarse system time granularity). We require some amount of time to pass to ensure that the conversion rate is fairly accurate in aggregate. But because we compute this rate lazily, there's a pretty good chance a decent amount of time has passed by the time we get here. Must be called from a normal goroutine context (running regular goroutine with a P). Called by runtime/pprof in addition to runtime code. TODO(mknyszek): This doesn't account for things like CPU frequency scaling. Consider a more sophisticated and general approach in the future.

func ticksPerSecond() int64

timeHistogramMetricsBuckets function #

timeHistogramMetricsBuckets generates a slice of boundaries for the timeHistogram. These boundaries are represented in seconds, not nanoseconds like the timeHistogram represents durations.

func timeHistogramMetricsBuckets() []float64

timeSleep function #

timeSleep puts the current goroutine to sleep for at least ns nanoseconds. go:linkname timeSleep time.Sleep

func timeSleep(ns int64)

timeSleepUntil function #

timeSleepUntil returns the time when the next timer should fire. Returns maxWhen if there are no timers. This is only called by sysmon and checkdead.

func timeSleepUntil() int64

time_now function #

time_now should be an internal detail, but widely used packages access it using linkname. Notable members of the hall of shame include: - gitee.com/quant1x/gox - github.com/phuslu/log - github.com/sethvargo/go-limiter - github.com/ulule/limiter/v3 Do not remove or change the type signature. See go.dev/issue/67401. go:linkname time_now time.now

func time_now() (sec int64, nsec int32, mono int64)

time_now function #

go:linkname time_now time.now

func time_now() (sec int64, nsec int32, mono int64)

time_now function #

go:linkname time_now time.now

func time_now() (sec int64, nsec int32, mono int64)

time_runtimeNano function #

go:linkname time_runtimeNano time.runtimeNano

func time_runtimeNano() int64

time_runtimeNow function #

go:linkname time_runtimeNow time.runtimeNow

func time_runtimeNow() (sec int64, nsec int32, mono int64)

timediv function #

Poor mans 64-bit division. This is a very special function, do not use it if you are not sure what you are doing. int64 division is lowered into _divv() call on 386, which does not fit into nosplit functions. Handles overflow in a time-specific manner. This keeps us within no-split stack limits on 32-bit processors. go:nosplit

func timediv(v int64, div int32, rem *int32) int32

timer_create function #

go:noescape

func timer_create(clockid int32, sevp *sigevent, timerid *int32) int32

timer_delete function #

go:noescape

func timer_delete(timerid int32) int32

timer_settime function #

go:noescape

func timer_settime(timerid int32, flags int32, new *itimerspec, old *itimerspec) int32

timerchandrain function #

timerchandrain removes all elements in channel c's buffer. It reports whether any elements were removed. Because it is only intended for timers, it does not handle waiting senders at all (all timer channels use non-blocking sends to fill the buffer).

func timerchandrain(c *hchan) bool

tlsinit function #

tlsinit allocates a thread-local storage slot for g. It finds the first available slot using pthread_key_create and uses it as the offset value for runtime.tlsg. This runs at startup on g0 stack, but before g is set, so it must not split stack (transitively). g is expected to be nil, so things (e.g. asmcgocall) will skip saving or reading g. go:nosplit

func tlsinit(tlsg *uintptr, tlsbase *[_PTHREAD_KEYS_MAX]uintptr)

toRType function #

func toRType(t *abi.Type) rtype

tooManyOverflowBuckets function #

tooManyOverflowBuckets reports whether noverflow buckets is too many for a map with 1<

func tooManyOverflowBuckets(noverflow uint16, B uint8) bool

tophash function #

tophash calculates the tophash value for hash.

func tophash(hash uintptr) uint8

totalMutexWaitTimeNanos function #

func totalMutexWaitTimeNanos() int64

tp method #

func (c *sigctxt) tp() uint64

tp method #

func (c *sigctxt) tp() uint64

tp method #

func (c *sigctxt) tp() uint64

trace method #

func (t *timer) trace(op string)

trace method #

func (ts *timers) trace(op string)

trace1 method #

func (t *timer) trace1(op string)

traceAcquire function #

traceAcquire prepares this M for writing one or more trace events. nosplit because it's called on the syscall path when stack movement is forbidden. go:nosplit

func traceAcquire() traceLocker

traceAcquireEnabled function #

traceAcquireEnabled is the traceEnabled path for traceAcquire. It's explicitly broken out to make traceAcquire inlineable to keep the overhead of the tracer when it's disabled low. nosplit because it's called by traceAcquire, which is nosplit. go:nosplit

func traceAcquireEnabled() traceLocker

traceAdvance function #

traceAdvance moves tracing to the next generation, and cleans up the current generation, ensuring that it's flushed out before returning. If stopTrace is true, it disables tracing altogether instead of advancing to the next generation. traceAdvanceSema must not be held. traceAdvance is called by golang.org/x/exp/trace using linkname. go:linkname traceAdvance

func traceAdvance(stopTrace bool)

traceAllocFreeEnabled function #

traceAllocFreeEnabled returns true if the trace is currently enabled and alloc/free events are also enabled. go:nosplit

func traceAllocFreeEnabled() bool

traceBufFlush function #

traceBufFlush flushes a trace buffer. Must run on the system stack because trace.lock must be held. go:systemstack

func traceBufFlush(buf *traceBuf, gen uintptr)

traceCPUFlush function #

traceCPUFlush flushes trace.cpuBuf[gen%2]. The caller must be certain that gen has completed and that there are no more writers to it.

func traceCPUFlush(gen uintptr)

traceCPUSample function #

traceCPUSample writes a CPU profile sample stack to the execution tracer's profiling buffer. It is called from a signal handler, so is limited in what it can do. mp must be the thread that is currently stopped in a signal.

func traceCPUSample(gp *g, mp *m, pp *p, stk []uintptr)

traceClockNow function #

traceClockNow returns a monotonic timestamp. The clock this function gets the timestamp from is specific to tracing, and shouldn't be mixed with other clock sources. nosplit because it's called from exitsyscall and various trace writing functions, which are nosplit. traceClockNow is called by golang.org/x/exp/trace using linkname. go:linkname traceClockNow go:nosplit

func traceClockNow() traceTime

traceClockUnitsPerSecond function #

traceClockUnitsPerSecond estimates the number of trace clock units per second that elapse.

func traceClockUnitsPerSecond() uint64

traceCompressStackSize function #

traceCompressStackSize assumes size is a power of 2 and returns log2(size).

func traceCompressStackSize(size uintptr) traceArg

traceEnabled function #

traceEnabled returns true if the trace is currently enabled. go:nosplit

func traceEnabled() bool

traceExitedSyscall function #

traceExitedSyscall marks a goroutine as having exited the syscall slow path.

func traceExitedSyscall()

traceExitingSyscall function #

traceExitingSyscall marks a goroutine as exiting the syscall slow path. Must be paired with a traceExitedSyscall call.

func traceExitingSyscall()

traceFrequency function #

traceFrequency writes a batch with a single EvFrequency event. freq is the number of trace clock units per second.

func traceFrequency(gen uintptr)

traceGoroutineStackID function #

traceGoroutineStackID creates a trace ID for the goroutine stack from its base address.

func traceGoroutineStackID(base uintptr) traceArg

traceHeapObjectID function #

traceHeapObjectID creates a trace ID for a heap object at address addr.

func traceHeapObjectID(addr uintptr) traceArg

traceInitReadCPU function #

traceInitReadCPU initializes CPU profile -> tracer state for tracing. Returns a profBuf for reading from.

func traceInitReadCPU()

traceLockInit function #

traceLockInit initializes global trace locks.

func traceLockInit()

traceNextGen function #

func traceNextGen(gen uintptr) uintptr

traceReadCPU function #

traceReadCPU attempts to read from the provided profBuf[gen%2] and write into the trace. Returns true if there might be more to read or false if the profBuf is closed or the caller should otherwise stop reading. The caller is responsible for ensuring that gen does not change. Either the caller must be in a traceAcquire/traceRelease block, or must be calling with traceAdvanceSema held. No more than one goroutine may be in traceReadCPU for the same profBuf at a time. Must not run on the system stack because profBuf.read performs race operations.

func traceReadCPU(gen uintptr) bool

traceReader function #

traceReader returns the trace reader that should be woken up, if any. Callers should first check (traceEnabled() || traceShuttingDown()). This must run on the system stack because it acquires trace.lock. go:systemstack

func traceReader() *g

traceReaderAvailable function #

traceReaderAvailable returns the trace reader if it is not currently scheduled and should be. Callers should first check that (traceEnabled() || traceShuttingDown()) is true.

func traceReaderAvailable() *g

traceRegisterLabelsAndReasons function #

traceRegisterLabelsAndReasons re-registers mark worker labels and goroutine stop/block reasons in the string table for the provided generation. Note: the provided generation must not have started yet.

func traceRegisterLabelsAndReasons(gen uintptr)

traceRelease function #

traceRelease indicates that this M is done writing trace events. nosplit because it's called on the syscall path when stack movement is forbidden. go:nosplit

func traceRelease(tl traceLocker)

traceShuttingDown function #

traceShuttingDown returns true if the trace is currently shutting down.

func traceShuttingDown() bool

traceSnapshotMemory function #

traceSnapshotMemory takes a snapshot of all runtime memory that there are events for (heap spans, heap objects, goroutine stacks, etc.) and writes out events for them. The world must be stopped and tracing must be enabled when this function is called.

func traceSnapshotMemory(gen uintptr)

traceSpanID function #

traceSpanID creates a trace ID for the span s for the trace.

func traceSpanID(s *mspan) traceArg

traceSpanTypeAndClass function #

func traceSpanTypeAndClass(s *mspan) traceArg

traceStack function #

traceStack captures a stack trace from a goroutine and registers it in the trace stack table. It then returns its unique ID. If gp == nil, then traceStack will attempt to use the current execution context. skip controls the number of leaf frames to omit in order to hide tracer internals from stack traces, see CL 5523. Avoid calling this function directly. gen needs to be the current generation that this stack trace is being written out for, which needs to be synchronized with generations moving forward. Prefer traceEventWriter.stack.

func traceStack(skip int, gp *g, gen uintptr) uint64

traceStartReadCPU function #

traceStartReadCPU creates a goroutine to start reading CPU profile data into an active trace. traceAdvanceSema must be held.

func traceStartReadCPU()

traceStopReadCPU function #

traceStopReadCPU blocks until the trace CPU reading goroutine exits. traceAdvanceSema must be held, and tracing must be disabled.

func traceStopReadCPU()

traceThreadDestroy function #

traceThreadDestroy is called when a thread is removed from sched.freem. mp must not be able to emit trace events anymore. sched.lock must be held to synchronize with traceAdvance.

func traceThreadDestroy(mp *m)

trace_userLog function #

trace_userLog emits a UserRegionBegin or UserRegionEnd event. go:linkname trace_userLog runtime/trace.userLog

func trace_userLog(id uint64, category string, message string)

trace_userRegion function #

trace_userRegion emits a UserRegionBegin or UserRegionEnd event, depending on mode (0 == Begin, 1 == End). TODO(mknyszek): Just make this two functions. go:linkname trace_userRegion runtime/trace.userRegion

func trace_userRegion(id uint64, mode uint64, name string)

trace_userTaskCreate function #

trace_userTaskCreate emits a UserTaskCreate event. go:linkname trace_userTaskCreate runtime/trace.userTaskCreate

func trace_userTaskCreate(id uint64, parentID uint64, taskType string)

trace_userTaskEnd function #

trace_userTaskEnd emits a UserTaskEnd event. go:linkname trace_userTaskEnd runtime/trace.userTaskEnd

func trace_userTaskEnd(id uint64)

traceback method #

go:nosplit

func (l *dloggerImpl) traceback(x []uintptr) *dloggerImpl

traceback function #

func traceback(pc uintptr, sp uintptr, lr uintptr, gp *g)

traceback method #

go:nosplit

func (l dloggerFake) traceback(x []uintptr) dloggerFake

traceback1 function #

func traceback1(pc uintptr, sp uintptr, lr uintptr, gp *g, flags unwindFlags)

traceback2 function #

traceback2 prints a stack trace starting at u. It skips the first "skip" logical frames, after which it prints at most "max" logical frames. It returns n, which is the number of logical frames skipped and printed, and lastN, which is the number of logical frames skipped or printed just in the physical frame that u references.

func traceback2(u *unwinder, showRuntime bool, skip int, max int) (n int, lastN int)

tracebackHexdump function #

tracebackHexdump hexdumps part of stk around frame.sp and frame.fp for debugging purposes. If the address bad is included in the hexdumped range, it will mark it as well.

func tracebackHexdump(stk stack, frame *stkframe, bad uintptr)

tracebackPCs function #

tracebackPCs populates pcBuf with the return addresses for each frame from u and returns the number of PCs written to pcBuf. The returned PCs correspond to "logical frames" rather than "physical frames"; that is if A is inlined into B, this will still return a PCs for both A and B. This also includes PCs generated by the cgo unwinder, if one is registered. If skip != 0, this skips this many logical frames. Callers should set the unwindSilentErrors flag on u.

func tracebackPCs(u *unwinder, skip int, pcBuf []uintptr) int

tracebackothers function #

func tracebackothers(me *g)

tracebacktrap function #

tracebacktrap is like traceback but expects that the PC and SP were obtained from a trap, not from gp->sched or gp->syscallpc/gp->syscallsp or GetCallerPC/GetCallerSP. Because they are from a trap instead of from a saved pair, the initial PC must not be rewound to the previous instruction. (All the saved pairs record a PC that is a return address, so we rewind it into the CALL instruction.) If gp.m.libcall{g,pc,sp} information is available, it uses that information in preference to the pc/sp/lr passed in.

func tracebacktrap(pc uintptr, sp uintptr, lr uintptr, gp *g)

tracefpunwindoff function #

tracefpunwindoff returns true if frame pointer unwinding for the tracer is disabled via GODEBUG or not supported by the architecture.

func tracefpunwindoff() bool

trap method #

func (c *sigctxt) trap() uint64

trap method #

TODO(aix): find trap equivalent

func (c *sigctxt) trap() uint32

trap method #

func (c *sigctxt) trap() uint64

trap method #

func (c *sigctxt) trap() uint32

trap method #

func (c *sigctxt) trap() uint32

trap method #

func (c *sigctxt) trap() uint32

trap method #

func (c *sigctxt) trap() uint64

trap method #

func (c *sigctxt) trap() uint32

trigger method #

trigger returns the current point at which a GC should trigger along with the heap goal. The returned value may be compared against heapLive to determine whether the GC should trigger. Thus, the GC trigger condition should be (but may not be, in the case of small movements for efficiency) checked whenever the heap goal may change.

func (c *gcControllerState) trigger() (uint64, uint64)

tryAcquire method #

tryAcquire attempts to acquire sweep ownership of span s. If it successfully acquires ownership, it blocks sweep completion.

func (l *sweepLocker) tryAcquire(s *mspan) (sweepLocked, bool)

tryAlloc method #

tryAlloc allocates from b or returns nil if b does not have enough room. This is safe to call concurrently.

func (b *gcBitsArena) tryAlloc(bytes uintptr) *gcBits

tryAllocMSpan method #

tryAllocMSpan attempts to allocate an mspan object from the P-local cache, but may fail. h.lock need not be held. This caller must ensure that its P won't change underneath it during this function. Currently to ensure that we enforce that the function is run on the system stack, because that's the only place it is used now. In the future, this requirement may be relaxed if its use is necessary elsewhere. go:systemstack

func (h *mheap) tryAllocMSpan() *mspan

tryChunkOf method #

tryChunkOf returns the bitmap data for the given chunk. Returns nil if the chunk data has not been mapped.

func (p *pageAlloc) tryChunkOf(ci chunkIdx) *pallocData

tryGet method #

tryGet dequeues a pointer for the garbage collector to trace. If there are no pointers remaining in this gcWork or in the global queue, tryGet returns 0. Note that there may still be pointers in other gcWork instances or other caches. go:nowritebarrierrec

func (w *gcWork) tryGet() uintptr

tryGetFast method #

tryGetFast dequeues a pointer for the garbage collector to trace if one is readily available. Otherwise it returns 0 and the caller is expected to call tryGet(). go:nowritebarrierrec

func (w *gcWork) tryGetFast() uintptr

tryLock method #

tryLock attempts to lock l. Returns true on success.

func (l *gcCPULimiterState) tryLock() bool

tryMerge method #

func (a *abiPart) tryMerge(b abiPart) bool

tryRecordGoroutineProfile function #

tryRecordGoroutineProfile ensures that gp1 has the appropriate representation in the current goroutine profile: either that it should not be profiled, or that a snapshot of its call stack and labels are now in the profile.

func tryRecordGoroutineProfile(gp1 *g, pcbuf []uintptr, yield func())

tryRecordGoroutineProfileWB function #

tryRecordGoroutineProfileWB asserts that write barriers are allowed and calls tryRecordGoroutineProfile. go:yeswritebarrierrec

func tryRecordGoroutineProfileWB(gp1 *g)

tryRegAssignArg method #

tryRegAssignArg tries to register-assign a value of type t. If this type is nested in an aggregate type, then offset is the offset of this type within its parent type. Assumes t.size <= goarch.PtrSize and t.size != 0. Returns whether the assignment succeeded.

func (p *abiDesc) tryRegAssignArg(t *_type, offset uintptr) bool

trygetfull function #

trygetfull tries to get a full or partially empty workbuffer. If one is not immediately available return nil. go:nowritebarrier

func trygetfull() *workbuf

tstart_plan9 function #

go:noescape

func tstart_plan9(newm *m)

tstart_stdcall function #

Function to be called by windows CreateThread to start new os thread.

func tstart_stdcall(newm *m)

tstart_sysvicall function #

func tstart_sysvicall(newm *m) uint32

typ method #

type extracts the event type from the stamp.

func (s limiterEventStamp) typ() limiterEventType

typeAssert function #

typeAssert builds an itab for the concrete type t and the interface type s.Inter. If the conversion is not possible it panics if s.CanFail is false and returns nil if s.CanFail is true.

func typeAssert(s *abi.TypeAssert, t *_type) *itab

typeBitsBulkBarrier function #

typeBitsBulkBarrier executes a write barrier for every pointer that would be copied from [src, src+size) to [dst, dst+size) by a memmove using the type bitmap to locate those pointer slots. The type typ must correspond exactly to [src, src+size) and [dst, dst+size). dst, src, and size must be pointer-aligned. Must not be preempted because it typically runs right before memmove, and the GC must observe them as an atomic action. Callers must perform cgo checks if goexperiment.CgoCheck2. go:nosplit

func typeBitsBulkBarrier(typ *_type, dst uintptr, src uintptr, size uintptr)

typeOff method #

func (t rtype) typeOff(off typeOff) *_type

typePointersOf method #

typePointersOf returns an iterator over all heap pointers in the range [addr, addr+size). addr and addr+size must be in the range [span.base(), span.limit). Note: addr+size must be passed as the limit argument to the iterator's next method on each iteration. This slightly awkward API is to allow typePointers to be destructured by the compiler. nosplit because it is used during write barriers and must not be preempted. go:nosplit

func (span *mspan) typePointersOf(addr uintptr, size uintptr) typePointers

typePointersOfType method #

typePointersOfType is like typePointersOf, but assumes addr points to one or more contiguous instances of the provided type. The provided type must not be nil. It returns an iterator that tiles typ's gcmask starting from addr. It's the caller's responsibility to limit iteration. nosplit because its callers are nosplit and require all their callees to be nosplit. go:nosplit

func (span *mspan) typePointersOfType(typ *abi.Type, addr uintptr) typePointers

typePointersOfUnchecked method #

typePointersOfUnchecked is like typePointersOf, but assumes addr is the base of an allocation slot in a span (the start of the object if no header, the header otherwise). It returns an iterator that generates all pointers in the range [addr, addr+span.elemsize). nosplit because it is used during write barriers and must not be preempted. go:nosplit

func (span *mspan) typePointersOfUnchecked(addr uintptr) typePointers

typedmemclr function #

typedmemclr clears the typed memory at ptr with type typ. The memory at ptr must already be initialized (and hence in type-safe state). If the memory is being initialized for the first time, see memclrNoHeapPointers. If the caller knows that typ has pointers, it can alternatively call memclrHasPointers. TODO: A "go:nosplitrec" annotation would be perfect for this. go:nosplit

func typedmemclr(typ *_type, ptr unsafe.Pointer)

typedmemmove function #

typedmemmove copies a value of type typ to dst from src. Must be nosplit, see #16026. TODO: Perfect for go:nosplitrec since we can't have a safe point anywhere in the bulk barrier or memmove. typedmemmove should be an internal detail, but widely used packages access it using linkname. Notable members of the hall of shame include: - github.com/RomiChan/protobuf - github.com/segmentio/encoding Do not remove or change the type signature. See go.dev/issue/67401. go:linkname typedmemmove go:nosplit

func typedmemmove(typ *abi.Type, dst unsafe.Pointer, src unsafe.Pointer)

typedslicecopy function #

typedslicecopy should be an internal detail, but widely used packages access it using linkname. Notable members of the hall of shame include: - github.com/segmentio/encoding Do not remove or change the type signature. See go.dev/issue/67401. go:linkname typedslicecopy go:nosplit

func typedslicecopy(typ *_type, dstPtr unsafe.Pointer, dstLen int, srcPtr unsafe.Pointer, srcLen int) int

typehash function #

typehash computes the hash of the object of type t at address p. h is the seed. This function is seldom used. Most maps use for hashing either fixed functions (e.g. f32hash) or compiler-generated functions (e.g. for a type like struct { x, y string }). This implementation is slower but more general and is used for hashing interface types (called from interhash or nilinterhash, above) or for hashing in maps generated by reflect.MapOf (reflect_typehash, below). Note: this function must match the compiler generated functions exactly. See issue 37716. typehash should be an internal detail, but widely used packages access it using linkname. Notable members of the hall of shame include: - github.com/puzpuzpuz/xsync/v2 - github.com/puzpuzpuz/xsync/v3 Do not remove or change the type signature. See go.dev/issue/67401. go:linkname typehash

func typehash(t *_type, p unsafe.Pointer, h uintptr) uintptr

typelinksinit function #

typelinksinit scans the types from extra modules and builds the moduledata typemap used to de-duplicate type pointers.

func typelinksinit()

typesEqual function #

typesEqual reports whether two types are equal. Everywhere in the runtime and reflect packages, it is assumed that there is exactly one *_type per Go type, so that pointer equality can be used to test if types are equal. There is one place that breaks this assumption: buildmode=shared. In this case a type can appear as two different pieces of memory. This is hidden from the runtime and reflect package by the per-module typemap built in typelinksinit. It uses typesEqual to map types from later modules back into earlier ones. Only typelinksinit needs this function.

func typesEqual(t *_type, v *_type, seen map[_typePair]struct{...}) bool

u method #

go:nosplit

func (l *dloggerImpl) u(x uint) *dloggerImpl

u method #

go:nosplit

func (l dloggerFake) u(x uint) dloggerFake

u16 method #

go:nosplit

func (l *dloggerImpl) u16(x uint16) *dloggerImpl

u16 method #

go:nosplit

func (l dloggerFake) u16(x uint16) dloggerFake

u32 method #

go:nosplit

func (l dloggerFake) u32(x uint32) dloggerFake

u32 method #

go:nosplit

func (l *dloggerImpl) u32(x uint32) *dloggerImpl

u64 method #

go:nosplit

func (l dloggerFake) u64(x uint64) dloggerFake

u64 method #

go:nosplit

func (l *dloggerImpl) u64(x uint64) *dloggerImpl

u8 method #

go:nosplit

func (l dloggerFake) u8(x uint8) dloggerFake

u8 method #

go:nosplit

func (l *dloggerImpl) u8(x uint8) *dloggerImpl

udiv function #

Called from compiler-generated code; declared for go vet.

func udiv()

uint32tofloat64 function #

func uint32tofloat64(a uint32) float64

uint64div function #

func uint64div(n uint64, d uint64) uint64

uint64mod function #

func uint64mod(n uint64, d uint64) uint64

uint64tofloat32 function #

func uint64tofloat32(y uint64) float32

uint64tofloat64 function #

func uint64tofloat64(y uint64) float64

unblockTimerChan function #

unblockTimerChan is called when a channel op that was blocked on c is no longer blocked. Every call to blockTimerChan must be paired with a call to unblockTimerChan. The caller holds the channel lock for c and possibly other channels. unblockTimerChan removes c from the timer heap when nothing is blocked on it anymore.

func unblockTimerChan(c *hchan)

unblocksig function #

unblocksig removes sig from the current thread's signal mask. This is nosplit and nowritebarrierrec because it is called from dieFromSignal, which can be called by sigfwdgo while running in the signal handler, on the signal stack, with no g available. go:nosplit go:nowritebarrierrec

func unblocksig(sig uint32)

uncacheSpan method #

Return span from an mcache. s must have a span class corresponding to this mcentral and it must not be empty.

func (c *mcentral) uncacheSpan(s *mspan)

uncommon method #

func (t rtype) uncommon() *uncommontype

unimplemented function #

func unimplemented(name string)

union method #

union returns the union of the two sets as a new set.

func (s statDepSet) union(b statDepSet) statDepSet

uniqueString method #

uniqueString returns a traceArg representing s which may be passed to write. The string is assumed to be unique or long, so it will be written out to the trace eagerly.

func (tl traceLocker) uniqueString(s string) traceArg

unique_runtime_registerUniqueMapCleanup function #

go:linkname unique_runtime_registerUniqueMapCleanup unique.runtime_registerUniqueMapCleanup

func unique_runtime_registerUniqueMapCleanup(f func())

unlinkAndNext method #

unlinkAndNext removes the current special from the list and moves the iterator to the next special. It returns the unlinked special.

func (i *specialsIter) unlinkAndNext() *special

unlock method #

unlock updates t.astate and unlocks the timer.

func (t *timer) unlock()

unlock method #

unlock unlocks rw for writing.

func (rw *rwmutex) unlock()

unlock function #

func unlock(l *mutex)

unlock function #

func unlock(l *mutex)

unlock method #

func (ts *timers) unlock()

unlock method #

unlock releases the lock on l. Must be called if tryLock returns true.

func (l *gcCPULimiterState) unlock()

unlock function #

func unlock(l *mutex)

unlock function #

func unlock(l *mutex)

unlock function #

func unlock(l *mutex)

unlock2 function #

func unlock2(l *mutex)

unlock2 function #

func unlock2(l *mutex)

unlock2 function #

We might not be holding a p in this code. go:nowritebarrier

func unlock2(l *mutex)

unlock2 function #

We might not be holding a p in this code. go:nowritebarrier

func unlock2(l *mutex)

unlock2 function #

func unlock2(l *mutex)

unlock2Wake function #

unlock2Wake updates the list of Ms waiting on l, waking an M if necessary. go:nowritebarrier

func unlock2Wake(l *mutex)

unlockAndRun method #

unlockAndRun unlocks and runs the timer t (which must be locked). If t is in a timer set (t.ts != nil), the caller must also have locked the timer set, and this call will temporarily unlock the timer set while running the timer function. unlockAndRun returns with t unlocked and t.ts (re-)locked. go:systemstack

func (t *timer) unlockAndRun(now int64)

unlockOSThread function #

go:nosplit

func unlockOSThread()

unlockWithRank function #

func unlockWithRank(l *mutex)

unlockWithRank function #

See comment on lockWithRank regarding stack splitting.

func unlockWithRank(l *mutex)

unlockextra function #

go:nosplit

func unlockextra(mp *m, delta int32)

unminit function #

Called from dropm to undo the effect of an minit.

func unminit()

unminit function #

Called from dropm to undo the effect of an minit. go:nosplit

func unminit()

unminit function #

Called from dropm to undo the effect of an minit.

func unminit()

unminit function #

Called from dropm to undo the effect of an minit.

func unminit()

unminit function #

Called from dropm to undo the effect of an minit. go:nosplit

func unminit()

unminit function #

Called from dropm to undo the effect of an minit. go:nosplit

func unminit()

unminit function #

Called from dropm to undo the effect of an minit. go:nosplit

func unminit()

unminit function #

Called from dropm to undo the effect of an minit. go:nosplit

func unminit()

unminit function #

Called from dropm to undo the effect of an minit. go:nosplit

func unminit()

unminit function #

Called from dropm to undo the effect of an minit. go:nosplit

func unminit()

unminit function #

func unminit()

unminitSignals function #

unminitSignals is called from dropm, via unminit, to undo the effect of calling minit on a non-Go thread. go:nosplit

func unminitSignals()

unpack method #

unpack unpacks all three values from the summary.

func (p pallocSum) unpack() (uint, uint, uint)

unpackNetpollSource function #

unpackNetpollSource returns the source packed key.

func unpackNetpollSource(key uintptr) uint8

unpackScavChunkData function #

unpackScavChunkData unpacks a scavChunkData from a uint64.

func unpackScavChunkData(sc uint64) scavChunkData

unpin method #

func (p *pinner) unpin()

unreachableMethod function #

The linker redirects a reference of a method that it determined unreachable to a reference to this function, so it will throw if ever called.

func unreachableMethod()

unsafeClear method #

unsafeClear clears the shard. Unsafe because the world must be stopped and values should be donated elsewhere before clearing.

func (m *consistentHeapStats) unsafeClear()

unsafeRead method #

unsafeRead aggregates the delta for this shard into out. Unsafe because it does so without any synchronization. The world must be stopped.

func (m *consistentHeapStats) unsafeRead(out *heapStatsDelta)

unsafeTraceExpWriter function #

unsafeTraceExpWriter produces a traceWriter for experimental trace batches that doesn't lock the trace. Data written to experimental batches need not conform to the standard trace format. It should only be used in contexts where either: - Another traceLocker is held. - trace.gen is prevented from advancing. This does not have the same stack growth restrictions as traceLocker.writer. buf may be nil.

func unsafeTraceExpWriter(gen uintptr, buf *traceBuf, exp traceExperiment) traceWriter

unsafeTraceWriter function #

unsafeTraceWriter produces a traceWriter that doesn't lock the trace. It should only be used in contexts where either: - Another traceLocker is held. - trace.gen is prevented from advancing. This does not have the same stack growth restrictions as traceLocker.writer. buf may be nil.

func unsafeTraceWriter(gen uintptr, buf *traceBuf) traceWriter

unsafeslice function #

Keep this code in sync with cmd/compile/internal/walk/builtin.go:walkUnsafeSlice

func unsafeslice(et *_type, ptr unsafe.Pointer, len int)

unsafeslice64 function #

Keep this code in sync with cmd/compile/internal/walk/builtin.go:walkUnsafeSlice

func unsafeslice64(et *_type, ptr unsafe.Pointer, len64 int64)

unsafeslicecheckptr function #

func unsafeslicecheckptr(et *_type, ptr unsafe.Pointer, len64 int64)

unsafestring function #

func unsafestring(ptr unsafe.Pointer, len int)

unsafestring64 function #

Keep this code in sync with cmd/compile/internal/walk/builtin.go:walkUnsafeString

func unsafestring64(ptr unsafe.Pointer, len64 int64)

unsafestringcheckptr function #

func unsafestringcheckptr(ptr unsafe.Pointer, len64 int64)

unsetenv_c function #

Update the C environment if cgo is loaded.

func unsetenv_c(k string)

unspillArgs function #

func unspillArgs()

unspillArgs function #

func unspillArgs()

unspillArgs function #

func unspillArgs()

unspillArgs function #

func unspillArgs()

unspillArgs function #

func unspillArgs()

unwindm function #

func unwindm(restore *bool)

update method #

update updates heap metadata. It must be called each time the bitmap is updated. If contig is true, update does some optimizations assuming that there was a contiguous allocation or free between addr and addr+npages. alloc indicates whether the operation performed was an allocation or a free. p.mheapLock must be held.

func (p *pageAlloc) update(base uintptr, npages uintptr, contig bool, alloc bool)

update method #

update updates the bucket given runtime-specific information. now is the current monotonic time in nanoseconds. This is safe to call concurrently with other operations, except *GCTransition.

func (l *gcCPULimiterState) update(now int64)

update method #

func (s *sweepClass) update(sNew sweepClass)

update method #

func (c *gcControllerState) update(dHeapLive int64, dHeapScan int64)

updateHeap method #

updateHeap updates t as directed by t.state, updating t.state and returning a bool indicating whether the state (and ts.heap[0].when) changed. The caller must hold t's lock, or the world can be stopped instead. The timer set t.ts must be non-nil and locked, t must be t.ts.heap[0], and updateHeap takes care of moving t within the timers heap to preserve the heap invariants. If ts == nil, then t must not be in a heap (or is in a heap that is temporarily not maintaining its invariant, such as during timers.adjust).

func (t *timer) updateHeap() (updated bool)

updateLocked method #

updateLocked is the implementation of update. l.lock must be held.

func (l *gcCPULimiterState) updateLocked(now int64)

updateMinWhenHeap method #

updateMinWhenHeap sets ts.minWhenHeap to ts.heap[0].when. The caller must have locked ts or the world must be stopped.

func (ts *timers) updateMinWhenHeap()

updateMinWhenModified method #

updateMinWhenModified updates ts.minWhenModified to be <= when. ts need not be (and usually is not) locked.

func (ts *timers) updateMinWhenModified(when int64)

uptr method #

go:nosplit

func (l dloggerFake) uptr(x uintptr) dloggerFake

uptr method #

go:nosplit

func (l *dloggerImpl) uptr(x uintptr) *dloggerImpl

userArenaChunkReserveBytes function #

userArenaChunkReserveBytes returns the amount of additional bytes to reserve for heap metadata.

func userArenaChunkReserveBytes() uintptr

userArenaHeapBitsSetSliceType function #

userArenaHeapBitsSetSliceType is the equivalent of heapBitsSetType but for Go slice backing store values allocated in a user arena chunk. It sets up the heap bitmap for n consecutive values with type typ allocated at address ptr.

func userArenaHeapBitsSetSliceType(typ *_type, n int, ptr unsafe.Pointer, s *mspan)

userArenaHeapBitsSetType function #

userArenaHeapBitsSetType is the equivalent of heapSetType but for non-slice-backing-store Go values allocated in a user arena chunk. It sets up the type metadata for the value with type typ allocated at address ptr. base is the base address of the arena chunk.

func userArenaHeapBitsSetType(typ *_type, ptr unsafe.Pointer, s *mspan)

userArenaNextFree method #

userArenaNextFree reserves space in the user arena for an item of the specified type. If cap is not -1, this is for an array of cap elements of type t.

func (s *mspan) userArenaNextFree(typ *_type, cap int) unsafe.Pointer

usesLibcall function #

usesLibcall indicates whether this runtime performs system calls via libcall.

func usesLibcall() bool

usleep function #

go:nosplit go:cgo_unsafe_args

func usleep(usec uint32)

usleep function #

go:nosplit

func usleep(µs uint32)

usleep function #

func usleep(usec uint32)

usleep function #

func usleep(usec uint32)

usleep function #

go:nosplit go:cgo_unsafe_args

func usleep(usec uint32)

usleep function #

go:nosplit

func usleep(us uint32)

usleep function #

go:nosplit

func usleep(µs uint32)

usleep function #

func usleep(usec uint32)

usleep function #

go:nosplit

func usleep(us uint32)

usleep function #

func usleep(usec uint32)

usleep1 function #

func usleep1(usec uint32)

usleep1 function #

func usleep1(us uint32)

usleep_no_g function #

go:nosplit go:cgo_unsafe_args

func usleep_no_g(usec uint32)

usleep_no_g function #

go:nosplit

func usleep_no_g(usec uint32)

usleep_no_g function #

go:nosplit

func usleep_no_g(µs uint32)

usleep_no_g function #

go:nosplit go:cgo_unsafe_args

func usleep_no_g(usec uint32)

usleep_no_g function #

go:nosplit

func usleep_no_g(us uint32)

usleep_no_g function #

go:nosplit

func usleep_no_g(usec uint32)

usleep_no_g function #

go:nosplit

func usleep_no_g(us uint32)

usleep_no_g function #

go:nosplit

func usleep_no_g(usec uint32)

usleep_no_g function #

go:nosplit

func usleep_no_g(usec uint32)

usleep_trampoline function #

func usleep_trampoline()

usleep_trampoline function #

func usleep_trampoline()

usplit function #

for testing

func usplit(x uint32) (q uint32, r uint32)

usplitR0 function #

Called from assembly only; declared for go vet.

func usplitR0()

uvarint method #

func (r *debugLogReader) uvarint() uint64

uvarint method #

go:nosplit

func (l *debugLogWriter) uvarint(u uint64)

valid method #

func (i *specialsIter) valid() bool

valid method #

func (f funcInfo) valid() bool

valid method #

func (uf inlineFrame) valid() bool

valid method #

func (u *unwinder) valid() bool

validSIGPROF function #

go:nosplit

func validSIGPROF(mp *m, c *sigctxt) bool

validSIGPROF function #

go:nosplit

func validSIGPROF(mp *m, c *sigctxt) bool

validSIGPROF function #

go:nosplit

func validSIGPROF(mp *m, c *sigctxt) bool

validSIGPROF function #

go:nosplit

func validSIGPROF(mp *m, c *sigctxt) bool

validSIGPROF function #

go:nosplit

func validSIGPROF(mp *m, c *sigctxt) bool

validSIGPROF function #

go:nosplit

func validSIGPROF(mp *m, c *sigctxt) bool

validSIGPROF function #

validSIGPROF compares this signal delivery's code against the signal sources that the profiler uses, returning whether the delivery should be processed. To be processed, a signal delivery from a known profiling mechanism should correspond to the best profiling mechanism available to this thread. Signals from other sources are always considered valid. go:nosplit

func validSIGPROF(mp *m, c *sigctxt) bool

validSIGPROF function #

go:nosplit

func validSIGPROF(mp *m, c *sigctxt) bool

values function #

values for implementing maps.values go:linkname values maps.values

func values(m any, p unsafe.Pointer)

values function #

values for implementing maps.values go:linkname values maps.values

func values(m any, p unsafe.Pointer)

varint method #

varint appends v to buf in little-endian-base-128 encoding. nosplit because it's part of writing an event for an M, which must not have any stack growth. go:nosplit

func (buf *traceBuf) varint(v uint64)

varint method #

func (r *debugLogReader) varint() int64

varint method #

go:nosplit

func (l *debugLogWriter) varint(x int64)

varintAt method #

varintAt writes varint v at byte position pos in buf. This always consumes traceBytesPerNumber bytes. This is intended for when the caller needs to reserve space for a varint but can't populate it until later. Use varintReserve to reserve this space. nosplit because it's part of writing an event for an M, which must not have any stack growth. go:nosplit

func (buf *traceBuf) varintAt(pos int, v uint64)

varintReserve method #

varintReserve reserves enough space in buf to hold any varint. Space reserved this way can be filled in with the varintAt method. nosplit because it's part of writing an event for an M, which must not have any stack growth. go:nosplit

func (buf *traceBuf) varintReserve() int

vdsoCall function #

func vdsoCall()

vdsoClockGettime function #

go:nosplit

func vdsoClockGettime(clockID int32) bintime

vdsoFindVersion function #

func vdsoFindVersion(info *vdsoInfo, ver *vdsoVersionKey) int32

vdsoInitFromSysinfoEhdr function #

func vdsoInitFromSysinfoEhdr(info *vdsoInfo, hdr *elfEhdr)

vdsoParseSymbols function #

func vdsoParseSymbols(info *vdsoInfo, version int32)

vdsoauxv function #

func vdsoauxv(tag uintptr, val uintptr)

vdsoauxv function #

func vdsoauxv(tag uintptr, val uintptr)

verify method #

verifyTimerHeap verifies that the timers is in a valid state. This is only for debugging, and is only called if verifyTimers is true. The caller must have locked ts.

func (ts *timers) verify()

vgetrandom function #

go:linkname vgetrandom

func vgetrandom(p []byte, flags uint32) (ret int, supported bool)

vgetrandom function #

This is exported for use in internal/syscall/unix as well as x/sys/unix. go:linkname vgetrandom

func vgetrandom(p []byte, flags uint32) (ret int, supported bool)

vgetrandom1 function #

go:noescape

func vgetrandom1(buf *byte, length uintptr, flags uint32, state uintptr, stateSize uintptr) int

vgetrandomDestroy function #

func vgetrandomDestroy(mp *m)

vgetrandomDestroy function #

Free vgetrandom state from the M (if any) prior to destroying the M. This may allocate, so it must have a P.

func vgetrandomDestroy(mp *m)

vgetrandomGetState function #

func vgetrandomGetState() uintptr

vgetrandomInit function #

func vgetrandomInit()

vgetrandomInit function #

func vgetrandomInit()

wake method #

wake immediately unparks the scavenger if necessary. Safe to run without a P.

func (s *scavengerState) wake()

wake method #

wake awakens any goroutine sleeping on the timer. Safe for concurrent use with all other methods.

func (s *wakeableSleep) wake()

wakeNetPoller function #

wakeNetPoller wakes up the thread sleeping in the network poller if it isn't going to wake up before the when argument; or it wakes an idle P to service timers and the network poller if there isn't one already.

func wakeNetPoller(when int64)

wakeNetpoll function #

func wakeNetpoll(_ int32)

wakeNetpoll function #

func wakeNetpoll(kq int32)

wakeTime method #

wakeTime looks at ts's timers and returns the time when we should wake up the netpoller. It returns 0 if there are no timers. This function is invoked when dropping a P, so it must run without any write barriers. go:nowritebarrierrec

func (ts *timers) wakeTime() int64

wakefing function #

func wakefing() *g

wakep function #

Tries to add one more P to execute G's. Called when a G is made runnable (newproc, ready). Must be called with a P. wakep should be an internal detail, but widely used packages access it using linkname. Notable members of the hall of shame include: - gvisor.dev/gvisor Do not remove or change the type signature. See go.dev/issue/67401. go:linkname wakep

func wakep()

wakeupExtra method #

wakeupExtra must be called after setting one of the "extra" atomic fields b.overflow or b.eof. It records the change in b.w and wakes up the reader if needed.

func (b *profBuf) wakeupExtra()

walltime function #

go:wasmimport gojs runtime.walltime

func walltime() (sec int64, nsec int32)

walltime function #

func walltime() (sec int64, nsec int32)

walltime function #

func walltime() (sec int64, nsec int32)

walltime function #

func walltime() (sec int64, nsec int32)

walltime function #

func walltime() (sec int64, nsec int32)

walltime function #

func walltime() (sec int64, nsec int32)

walltime function #

walltime should be an internal detail, but widely used packages access it using linkname. Notable members of the hall of shame include: - gitee.com/quant1x/gox Do not remove or change the type signature. See go.dev/issue/67401. go:linkname walltime go:nosplit go:cgo_unsafe_args

func walltime() (int64, int32)

walltime function #

go:nosplit

func walltime() (int64, int32)

walltime1 function #

func walltime1() (sec int64, nsec int32)

walltime_trampoline function #

func walltime_trampoline()

wantAsyncPreempt function #

wantAsyncPreempt returns whether an asynchronous preemption is queued for gp.

func wantAsyncPreempt(gp *g) bool

wasmDiv function #

func wasmDiv()

wasmExit function #

go:wasmimport gojs runtime.wasmExit

func wasmExit(code int32)

wasmTruncS function #

func wasmTruncS()

wasmTruncU function #

func wasmTruncU()

wasmWrite function #

go:wasmimport gojs runtime.wasmWrite go:noescape

func wasmWrite(fd uintptr, p unsafe.Pointer, n int32)

wbBufFlush function #

wbBufFlush flushes the current P's write barrier buffer to the GC workbufs. This must not have write barriers because it is part of the write barrier implementation. This and everything it calls must be nosplit because 1) the stack contains untyped slots from gcWriteBarrier and 2) there must not be a GC safe point between the write barrier test in the caller and flushing the buffer. TODO: A "go:nosplitrec" annotation would be perfect for this. go:nowritebarrierrec go:nosplit

func wbBufFlush()

wbBufFlush1 function #

wbBufFlush1 flushes p's write barrier buffer to the GC work queue. This must not have write barriers because it is part of the write barrier implementation, so this may lead to infinite loops or buffer corruption. This must be non-preemptible because it uses the P's workbuf. go:nowritebarrierrec go:systemstack

func wbBufFlush1(pp *p)

wbMove function #

wbMove performs the write barrier operations necessary before copying a region of memory from src to dst of type typ. Does not actually do the copying. go:nowritebarrierrec go:nosplit

func wbMove(typ *_type, dst unsafe.Pointer, src unsafe.Pointer)

wbZero function #

wbZero performs the write barrier operations necessary before zeroing a region of memory at address dst of type typ. Does not actually do the zeroing. go:nowritebarrierrec go:nosplit

func wbZero(typ *_type, dst unsafe.Pointer)

windowsFindfunc function #

func windowsFindfunc(lib uintptr, name []byte) stdFunction

windowsLoadSystemLib function #

func windowsLoadSystemLib(name []uint16) uintptr

windows_GetSystemDirectory function #

go:linkname windows_GetSystemDirectory internal/syscall/windows.GetSystemDirectory

func windows_GetSystemDirectory() string

windows_QueryPerformanceCounter function #

go:linkname windows_QueryPerformanceCounter internal/syscall/windows.QueryPerformanceCounter

func windows_QueryPerformanceCounter() int64

windows_QueryPerformanceFrequency function #

go:linkname windows_QueryPerformanceFrequency internal/syscall/windows.QueryPerformanceFrequency

func windows_QueryPerformanceFrequency() int64

winthrow function #

Always called on g0. gp is the G where the exception occurred. go:nosplit

func winthrow(info *exceptionrecord, r *context, gp *g)

wintls function #

Init-time helper

func wintls()

wirep function #

wirep is the first step of acquirep, which actually associates the current M to pp. This is broken out so we can disallow write barriers for this part, since we don't yet have a P. go:nowritebarrierrec go:nosplit

func wirep(pp *p)

worldStarted function #

go:nosplit

func worldStarted()

worldStarted function #

worldStarted that the world is starting. Caller must hold worldsema. nosplit to ensure it can be called in as many contexts as possible. go:nosplit

func worldStarted()

worldStopped function #

worldStopped notes that the world is stopped. Caller must hold worldsema. nosplit to ensure it can be called in as many contexts as possible. go:nosplit

func worldStopped()

worldStopped function #

go:nosplit

func worldStopped()

write method #

write appends the pointerness of the next valid pointer slots using the low valid bits of bits. 1=pointer, 0=scalar.

func (h writeUserArenaHeapBits) write(s *mspan, bits uintptr, valid uintptr) writeUserArenaHeapBits

write function #

write must be nosplit on Windows (see write1) go:nosplit

func write(fd uintptr, p unsafe.Pointer, n int32) int32

write function #

write is like the Unix write system call. We have to avoid write barriers to avoid potential deadlock on write calls. go:nowritebarrierrec

func write(fd uintptr, p unsafe.Pointer, n int32) int32

write method #

Write to b cnt bits starting at bit 0 of data. Requires cnt>0.

func (b bitCursor) write(data *byte, cnt uintptr)

write method #

write dumps the histogram to the passed metricValue as a float64 histogram.

func (h *timeHistogram) write(out *metricValue)

write method #

write writes an entry to the profiling buffer b. The entry begins with a fixed hdr, which must have length b.hdrsize, followed by a variable-sized stack and a single tag pointer *tagPtr (or nil if tagPtr is nil). No write barriers allowed because this might be called from a signal handler.

func (b *profBuf) write(tagPtr *unsafe.Pointer, now int64, hdr []uint64, stk []uintptr)

write1 function #

write1 must be nosplit because it's used as a last resort in functions like badmorestackg0. In such cases, we'll always take the ASCII path. go:nosplit

func write1(fd uintptr, buf unsafe.Pointer, n int32) int32

write1 function #

func write1(fd uintptr, p unsafe.Pointer, n int32) int32

write1 function #

func write1(fd uintptr, p unsafe.Pointer, n int32) int32

write1 function #

go:nosplit go:cgo_unsafe_args

func write1(fd uintptr, p unsafe.Pointer, n int32) int32

write1 function #

go:nosplit

func write1(fd uintptr, buf unsafe.Pointer, nbyte int32) int32

write1 function #

go:nosplit go:cgo_unsafe_args

func write1(fd uintptr, p unsafe.Pointer, n int32) int32

write1 function #

go:nosplit

func write1(fd uintptr, p unsafe.Pointer, n int32) int32

write1 function #

write1 calls the write system call. It returns a non-negative number of bytes written or a negative errno value. go:noescape

func write1(fd uintptr, p unsafe.Pointer, n int32) int32

write1 function #

go:nosplit

func write1(fd uintptr, buf unsafe.Pointer, n int32) int32

write1 function #

write1 calls the write system call. It returns a non-negative number of bytes written or a negative errno value. go:noescape

func write1(fd uintptr, p unsafe.Pointer, n int32) int32

write2 function #

func write2(fd uintptr, p uintptr, n int32) int32

writeConsole function #

writeConsole writes bufLen bytes from buf to the console File. It returns the number of bytes written.

func writeConsole(handle uintptr, buf unsafe.Pointer, bufLen int32) int

writeConsoleUTF16 function #

writeConsoleUTF16 is the dedicated windows calls that correctly prints to the console regardless of the current code page. Input is utf-16 code points. The handle must be a console handle.

func writeConsoleUTF16(handle uintptr, b []uint16)

writeErr function #

func writeErr(b []byte)

writeErr function #

go:nosplit

func writeErr(b []byte)

writeErrData function #

writeErrData is the common parts of writeErr{,Str}. go:nosplit

func writeErrData(data *byte, n int32)

writeErrStr function #

writeErrStr writes a string to descriptor 2. If SetCrashOutput(f) was called, it also writes to f. go:nosplit

func writeErrStr(s string)

writeFrameAt method #

go:nosplit

func (l *debugLogWriter) writeFrameAt(pos uint64, size uint64) bool

writeGoStatus method #

writeGoStatus emits a GoStatus event as well as any active ranges on the goroutine. nosplit because it's part of writing an event for an M, which must not have any stack growth. go:nosplit

func (w traceWriter) writeGoStatus(goid uint64, mid int64, status traceGoStatus, markAssist bool, stackID uint64) traceWriter

writeHeapBitsSmall method #

writeHeapBitsSmall writes the heap bits for small objects whose ptr/scalar data is stored as a bitmap at the end of the span. Assumes dataSize is <= ptrBits*goarch.PtrSize. x must be a pointer into the span. heapBitsInSpan(dataSize) must be true. dataSize must be >= typ.Size_. go:nosplit

func (span *mspan) writeHeapBitsSmall(x uintptr, dataSize uintptr, typ *_type) (scanSize uintptr)

writeLogdHeader function #

writeLogdHeader populates the header and returns the length of the payload.

func writeLogdHeader() int

writeProcStatus method #

writeProcStatus emits a ProcStatus event with all the provided information. The caller must have taken ownership of a P's status writing, and the P must be prevented from transitioning. nosplit because it's part of writing an event for an M, which must not have any stack growth. go:nosplit

func (w traceWriter) writeProcStatus(pid uint64, status traceProcStatus, inSweep bool) traceWriter

writeProcStatusForP method #

writeProcStatusForP emits a ProcStatus event for the provided p based on its status. The caller must fully own pp and it must be prevented from transitioning (e.g. this can be called by a forEachP callback or from a STW). nosplit because it's part of writing an event for an M, which must not have any stack growth. go:nosplit

func (w traceWriter) writeProcStatusForP(pp *p, inSTW bool) traceWriter

writeString method #

writeString writes the string to t.buf. Must run on the systemstack because it acquires t.lock. go:systemstack

func (t *traceStringTable) writeString(gen uintptr, id uint64, s string)

writeSync method #

go:nosplit

func (l *debugLogWriter) writeSync(tick uint64, nano uint64)

writeUint64LE method #

go:nosplit

func (l *debugLogWriter) writeUint64LE(x uint64)

writeUserArenaHeapBits method #

func (s *mspan) writeUserArenaHeapBits(addr uintptr) (h writeUserArenaHeapBits)

write_trampoline function #

func write_trampoline()

write_trampoline function #

func write_trampoline()

writeheapdump_m function #

func writeheapdump_m(fd uintptr, m *MemStats)

writer method #

writer returns an a traceWriter that writes into the current M's stream. Once this is called, the caller must guard against stack growth until end is called on it. Therefore, it's highly recommended to use this API in a "fluent" style, for example tl.writer().event(...).end(). Better yet, callers just looking to write events should use eventWriter when possible, which is a much safer wrapper around this function. nosplit to allow for safe reentrant tracing from stack growth paths. go:nosplit

func (tl traceLocker) writer() traceWriter

xer method #

func (c *sigctxt) xer() uint64

xer method #

func (c *sigctxt) xer() uint32

xer method #

func (c *sigctxt) xer() uint64

Generated with Arrow